gt
stringclasses 1
value | context
stringlengths 2.49k
119k
|
|---|---|
# Copyright 2018 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Neural network models for PDEs."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numbers
import enum
import numpy as np
import tensorflow as tf
from typing import Callable, List, Optional, Union, Dict, Tuple, TypeVar
from pde_superresolution import duckarray # pylint: disable=g-bad-import-order
from pde_superresolution import equations # pylint: disable=g-bad-import-order
from pde_superresolution import layers # pylint: disable=g-bad-import-order
from pde_superresolution import polynomials # pylint: disable=g-bad-import-order
from pde_superresolution import weno # pylint: disable=g-bad-import-order
TensorLike = Union[tf.Tensor, np.ndarray, numbers.Number] # pylint: disable=invalid-name
FINITE_DIFF = polynomials.Method.FINITE_DIFFERENCES
FINITE_VOL = polynomials.Method.FINITE_VOLUMES
def assert_consistent_solution(
equation: equations.Equation, solution: tf.Tensor):
"""Verify that a solution is consistent with the underlying equation.
Args:
equation: equation being modeled.
solution: float32 Tensor with dimensions [batch, x].
Raises:
ValueError: if solution does not have the expected size for the equation.
"""
if equation.grid.solution_num_points != solution.shape[-1].value:
raise ValueError('solution has unexpected size for equation: {} vs {}'
.format(solution.shape[-1].value,
equation.grid.solution_num_points))
def baseline_space_derivatives(
inputs: tf.Tensor,
equation: equations.Equation,
accuracy_order: int = None) -> tf.Tensor:
"""Calculate spatial derivatives using a baseline metohd."""
assert_consistent_solution(equation, inputs)
spatial_derivatives_list = []
for derivative_name, derivative_order in zip(
equation.DERIVATIVE_NAMES, equation.DERIVATIVE_ORDERS):
if accuracy_order is None:
# use the best baseline method
assert equation.exact_type() is type(equation)
if equation.EXACT_METHOD is equations.ExactMethod.POLYNOMIAL:
grid = (0.5 + np.arange(-3, 3)) * equation.grid.solution_dx
method = FINITE_VOL if equation.CONSERVATIVE else FINITE_DIFF
derivative = polynomials.reconstruct(
inputs, grid, method, derivative_order)
elif equation.EXACT_METHOD is equations.ExactMethod.SPECTRAL:
derivative = duckarray.spectral_derivative(
inputs, derivative_order, equation.grid.period)
elif equation.EXACT_METHOD is equations.ExactMethod.WENO:
if derivative_name == 'u_minus':
derivative = duckarray.roll(
weno.reconstruct_left(inputs), 1, axis=-1)
elif derivative_name == 'u_plus':
derivative = duckarray.roll(
weno.reconstruct_right(inputs), 1, axis=-1)
else:
assert derivative_name == 'u_x'
grid = polynomials.regular_grid(
grid_offset=equation.GRID_OFFSET,
derivative_order=derivative_order,
accuracy_order=3,
dx=equation.grid.solution_dx)
method = FINITE_VOL if equation.CONSERVATIVE else FINITE_DIFF
derivative = polynomials.reconstruct(
inputs, grid, method, derivative_order)
else:
# explicit accuracy order provided
assert type(equation) not in equations.FLUX_EQUATION_TYPES
grid = polynomials.regular_grid(
grid_offset=equation.GRID_OFFSET,
derivative_order=derivative_order,
accuracy_order=accuracy_order,
dx=equation.grid.solution_dx)
method = FINITE_VOL if equation.CONSERVATIVE else FINITE_DIFF
derivative = polynomials.reconstruct(
inputs, grid, method, derivative_order)
spatial_derivatives_list.append(derivative)
return tf.stack(spatial_derivatives_list, axis=-1)
def apply_space_derivatives(
derivatives: tf.Tensor,
inputs: tf.Tensor,
equation: equations.Equation) -> tf.Tensor:
"""Combine spatial derivatives with input to calculate time derivatives.
Args:
derivatives: float32 tensor with dimensions [batch, x, derivative] giving
unnormalized spatial derivatives, e.g., as output from
predict_derivatives() or center_finite_differences().
inputs: float32 tensor with dimensions [batch, x].
equation: equation being solved.
Returns:
Float32 Tensor with diensions [batch, x] giving the time derivatives for
the given inputs and derivative model.
"""
derivatives_dict = {
k: derivatives[..., i] for i, k in enumerate(equation.DERIVATIVE_NAMES)
}
return equation.equation_of_motion(inputs, derivatives_dict)
def integrate_ode(func: Callable[[tf.Tensor, float], tf.Tensor],
inputs: tf.Tensor,
num_time_steps: int,
time_step: float) -> tf.Tensor:
"""Integrate an equation with a fixed time-step.
Args:
func: function that can be called on (y, t) to calculate the time
derivative for tensor y at time t.
inputs: tensor with shape [batch, x] giving initial conditions to use for
time integration.
num_time_steps: integer number of time steps to integrate over.
time_step: size of each time step.
Returns:
Tensor with shape [batch, x, num_time_steps].
"""
times = np.arange(num_time_steps + 1) * time_step
result = tf.contrib.integrate.odeint_fixed(
func, inputs, times, method='midpoint')
# drop the first time step, which is exactly equal to the inputs.
return tf.transpose(result, perm=(1, 2, 0))[..., 1:]
def baseline_time_evolution(
inputs: tf.Tensor,
num_time_steps: int,
equation: equations.Equation) -> tf.Tensor:
"""Infer time evolution from inputs with our baseline model.
Args:
inputs: float32 Tensor with dimensions [batch, x].
num_time_steps: integer number of time steps to integrate over.
equation: equation being solved.
Returns:
Float32 Tensor with dimensions [batch, x, num_time_steps+1] with the
integrated solution.
"""
def func(y, t):
del t # unused
return apply_space_derivatives(
baseline_space_derivatives(y, equation, accuracy_order=1), y, equation)
return integrate_ode(func, inputs, num_time_steps, equation.time_step)
def result_stack(space_derivatives: Union[tf.Tensor, List[tf.Tensor]],
time_derivative: tf.Tensor,
integrated_solution: tf.Tensor = None) -> tf.Tensor:
"""Combine derivatives and solutions into a single stacked result tensor.
Args:
space_derivatives: Tensor with dimensions [..., derivative], where ...
indicates any number of leading dimensions that must exactly match
time_derivative.
time_derivative: Tensor with dimensions [...].
integrated_solution: Tensor with dimensions [..., time]
Returns:
Tensor with dimensions [..., derivative+time+1].
"""
tensors = [space_derivatives, time_derivative[..., tf.newaxis]]
if integrated_solution is not None:
tensors.append(integrated_solution)
return tf.concat(tensors, axis=-1)
def result_unstack(
tensor: tf.Tensor,
equation: equations.Equation
) -> Tuple[tf.Tensor, tf.Tensor, Optional[tf.Tensor]]:
"""Separate a stacked result tensor into components.
The first len(equation.DERIVATIVE_ORDERS) components of tensor are taken
to be space derivatives, followed by time derivatives, followed by zero or
more integrated solutions.
Args:
tensor: result tensor with one or more dimensions, e.g., from
result_stack().
equation: equation being solved.
Returns:
Tuple (space_derivatives, time_derivative, integrated_solution), where the
last solution is either a tensor or None, if there is no time integration.
"""
num_space_derivatives = len(equation.DERIVATIVE_ORDERS)
space_derivatives = tensor[..., :num_space_derivatives]
time_derivative = tensor[..., num_space_derivatives]
if tensor.shape[-1].value > num_space_derivatives + 1:
integrated_solution = tensor[..., num_space_derivatives+1:]
else:
integrated_solution = None
return (space_derivatives, time_derivative, integrated_solution)
def _stack_all_rolls(inputs: tf.Tensor, max_offset: int) -> tf.Tensor:
"""Stack together all rolls of inputs, from 0 to max_offset."""
rolled = [tf.concat([inputs[i:, ...], inputs[:i, ...]], axis=0)
for i in range(max_offset)]
return tf.stack(rolled, axis=0)
def baseline_result(inputs: tf.Tensor,
equation: equations.Equation,
num_time_steps: int = 0,
accuracy_order: int = None) -> tf.Tensor:
"""Calculate derivatives and time-evolution using our baseline model.
Args:
inputs: float32 Tensor with dimensions [batch, x].
equation: equation being solved.
num_time_steps: integer number of time steps to integrate over.
accuracy_order: optional explicit accuracy order.
Returns:
Float32 Tensor with dimensions [batch, x, channel] with inferred space
derivatives, time derivative and the integrated solution.
"""
if accuracy_order is None:
equation = equation.to_exact()
elif type(equation) in equations.FLUX_EQUATION_TYPES:
equation = equation.to_conservative()
space_derivatives = baseline_space_derivatives(
inputs, equation, accuracy_order=accuracy_order)
time_derivative = apply_space_derivatives(
space_derivatives, inputs, equation)
if num_time_steps:
integrated_solution = baseline_time_evolution(
inputs, num_time_steps, equation)
else:
integrated_solution = None
return result_stack(space_derivatives, time_derivative, integrated_solution)
def apply_noise(
inputs: tf.Tensor,
probability: float = 1.0,
amplitude: float = 1.0,
filtered: bool = False,
) -> tf.Tensor:
"""Apply noise to improve robustness."""
# The idea is to mimic the artifacts introducted by numerical integration.
keep = tf.expand_dims(tf.cast(
tf.random.uniform((tf.shape(inputs)[0],)) <= probability,
tf.float32), axis=1)
noise = tf.random.normal(tf.shape(inputs))
if filtered:
noise = noise - duckarray.smoothing_filter(noise)
return inputs + keep * amplitude * noise
def model_inputs(fine_inputs: tf.Tensor,
hparams: tf.contrib.training.HParams,
evaluation: bool = False) -> Dict[str, tf.Tensor]:
"""Create coarse model inputs from high resolution simulations.
Args:
fine_inputs: float32 Tensor with shape [batch, x] with results of
high-resolution simulations.
hparams: model hyperparameters.
evaluation: bool indicating whether to create data for evaluation or
model training.
Returns:
Dict of tensors with entries:
- 'labels': float32 Tensor with shape [batch, x//factor, derivative] with
finite difference derivatives computed at high resolution.
- 'baseline': float32 Tensor with shape [batch, x//factor, derivative] with
finite difference derivatives computed from low resolution inputs.
- 'inputs': float32 Tensor with shape [batch, x//factor] with low resolution
inputs.
"""
fine_equation, coarse_equation = equations.from_hparams(hparams)
assert fine_equation.grid.resample_factor == 1
resample_method = 'mean' if coarse_equation.CONSERVATIVE else 'subsample'
resample = duckarray.RESAMPLE_FUNCS[resample_method]
if evaluation:
ground_truth_order = None
else:
if hparams.ground_truth_order == -1:
ground_truth_order = None
else:
ground_truth_order = hparams.ground_truth_order
fine_derivatives = baseline_result(fine_inputs, fine_equation,
hparams.num_time_steps,
accuracy_order=ground_truth_order)
labels = resample(fine_derivatives, factor=hparams.resample_factor, axis=1)
coarse_inputs = resample(fine_inputs, factor=hparams.resample_factor, axis=1)
baseline = baseline_result(coarse_inputs, coarse_equation,
hparams.num_time_steps, accuracy_order=1)
if not evaluation and hparams.noise_probability:
if hparams.noise_type == 'white':
filtered = False
elif hparams.noise_type == 'filtered':
filtered = True
else:
raise ValueError('invalid noise_type: {}'.format(hparams.noise_type))
coarse_inputs = apply_noise(
coarse_inputs, hparams.noise_probability, hparams.noise_amplitude,
filtered=filtered)
return {'labels': labels, 'baseline': baseline, 'inputs': coarse_inputs}
@enum.unique
class Dataset(enum.Enum):
TRAINING = 0
VALIDATION = 1
def make_dataset(snapshots: np.ndarray,
hparams: tf.contrib.training.HParams,
dataset_type: Dataset = Dataset.TRAINING,
repeat: bool = True,
evaluation: bool = False) -> tf.data.Dataset:
"""Create a tf.data.Dataset for training or evaluation data.
Args:
snapshots: np.ndarray with shape [examples, x] with high-resolution
training data.
hparams: model hyperparameters.
dataset_type: enum indicating whether to use training or validation data.
repeat: whether to shuffle and repeat data.
evaluation: bool indicating whether to create data for evaluation or
model training.
Returns:
tf.data.Dataset containing a dictionary with three tensor values:
- 'labels': float32 Tensor with shape [batch, x//factor, derivative] with
finite difference derivatives computed at high resolution.
- 'baseline': float32 Tensor with shape [batch, x//factor, derivative] with
finite difference derivatives computed from low resolution inputs.
- 'inputs': float32 Tensor with shape [batch, x//factor] with low resolution
inputs.
"""
snapshots = np.asarray(snapshots, dtype=np.float32)
num_training = int(round(snapshots.shape[0] * hparams.frac_training))
if dataset_type is Dataset.TRAINING:
indexer = slice(None, num_training)
else:
assert dataset_type is Dataset.VALIDATION
indexer = slice(num_training, None)
dataset = tf.data.Dataset.from_tensor_slices(snapshots[indexer])
# no need to do dataset augmentation with rolling for eval
rolls_stop = 1 if evaluation else hparams.resample_factor
dataset = dataset.map(lambda x: _stack_all_rolls(x, rolls_stop))
dataset = dataset.map(lambda x: model_inputs(x, hparams, evaluation))
dataset = dataset.apply(tf.data.experimental.unbatch())
# our dataset is small enough to fit in memory and we are doing non-trivial
# preprocessing, so caching makes training *much* faster.
dataset = dataset.cache()
if repeat:
dataset = dataset.apply(
tf.contrib.data.shuffle_and_repeat(buffer_size=10000))
batch_size = hparams.base_batch_size * hparams.resample_factor
dataset = dataset.batch(batch_size)
dataset = dataset.prefetch(buffer_size=1)
return dataset
_NONLINEARITIES = {
'relu': tf.nn.relu,
'relu6': tf.nn.relu6,
'tanh': tf.tanh,
'softplus': tf.nn.softplus,
'elu': tf.nn.elu,
}
def predict_coefficients(inputs: tf.Tensor,
hparams: tf.contrib.training.HParams,
reuse: object = tf.AUTO_REUSE) -> tf.Tensor:
"""Predict finite difference coefficients with a neural networks.
Args:
inputs: float32 Tensor with dimensions [batch, x].
hparams: model hyperparameters.
reuse: whether or not to reuse TensorFlow variables.
Returns:
Float32 Tensor with dimensions [batch, x, derivative, coefficient].
Raises:
ValueError: if inputs does not have the expected size for the equation.
ValueError: if polynomial accuracy constraints are infeasible.
"""
# TODO(shoyer): refactor to use layer classes to hold variables, like
# tf.keras.layers, instead of relying on reuse.
_, equation = equations.from_hparams(hparams)
assert_consistent_solution(equation, inputs)
with tf.variable_scope('predict_coefficients', reuse=reuse):
num_derivatives = len(equation.DERIVATIVE_ORDERS)
grid = polynomials.regular_grid(
equation.GRID_OFFSET, derivative_order=0,
accuracy_order=hparams.coefficient_grid_min_size,
dx=equation.grid.solution_dx)
net = inputs[:, :, tf.newaxis]
net /= equation.standard_deviation
activation = _NONLINEARITIES[hparams.nonlinearity]
for _ in range(hparams.num_layers - 1):
net = layers.conv1d_periodic_layer(net, filters=hparams.filter_size,
kernel_size=hparams.kernel_size,
activation=activation, center=True)
if not hparams.polynomial_accuracy_order:
if hparams.num_layers == 0:
raise NotImplementedError
net = layers.conv1d_periodic_layer(
net, filters=num_derivatives*grid.size,
kernel_size=hparams.kernel_size, activation=None, center=True)
new_dims = [num_derivatives, grid.size]
outputs = tf.reshape(net, tf.concat([tf.shape(inputs), new_dims], axis=0))
outputs.set_shape(inputs.shape[:2].concatenate(new_dims))
if hparams.ensure_unbiased_coefficients:
if 0 in equation.DERIVATIVE_ORDERS:
raise ValueError('ensure_unbiased not yet supported for 0th order '
'spatial derivatives')
outputs -= tf.reduce_mean(outputs, axis=-1, keepdims=True)
else:
poly_accuracy_layers = []
for derivative_order in equation.DERIVATIVE_ORDERS:
method = FINITE_VOL if equation.CONSERVATIVE else FINITE_DIFF
poly_accuracy_layers.append(
polynomials.PolynomialAccuracyLayer(
grid=grid,
method=method,
derivative_order=derivative_order,
accuracy_order=hparams.polynomial_accuracy_order,
out_scale=hparams.polynomial_accuracy_scale)
)
input_sizes = [layer.input_size for layer in poly_accuracy_layers]
if hparams.num_layers > 0:
net = layers.conv1d_periodic_layer(net, filters=sum(input_sizes),
kernel_size=hparams.kernel_size,
activation=None, center=True)
else:
initializer = tf.initializers.zeros()
coefficients = tf.get_variable(
'coefficients', (sum(input_sizes),),
initializer=initializer)
net = tf.tile(coefficients[tf.newaxis, tf.newaxis, :],
[tf.shape(inputs)[0], inputs.shape[1].value, 1])
cum_sizes = np.cumsum(input_sizes)
starts = [0] + cum_sizes[:-1].tolist()
stops = cum_sizes.tolist()
zipped = zip(starts, stops, poly_accuracy_layers)
outputs = tf.stack([layer.apply(net[..., start:stop])
for start, stop, layer in zipped], axis=-2)
assert outputs.shape.as_list()[-1] == grid.size
return outputs
def extract_patches(inputs: tf.Tensor, size: int) -> tf.Tensor:
"""Extract overlapping patches from a batch of 1D tensors.
Args:
inputs: Tensor with dimensions [batch, x].
size: number of elements to include in each patch.
Returns:
Tensor with dimensions [batch, x, size].
"""
padded_inputs = layers.pad_periodic(inputs[..., tf.newaxis],
size - 1, center=True)
extracted = tf.extract_image_patches(padded_inputs[..., tf.newaxis],
ksizes=[1, size, 1, 1],
strides=[1, 1, 1, 1],
rates=[1, 1, 1, 1],
padding='VALID')
return tf.squeeze(extracted, axis=2)
def apply_coefficients(coefficients: tf.Tensor, inputs: tf.Tensor) -> tf.Tensor:
"""Combine coefficients and inputs to calculate spatial derivatives.
Args:
coefficients: float32 Tensor with dimensions [batch, x, derivative,
coefficient].
inputs: float32 Tensor with dimensions [batch, x].
Returns:
Tensor with dimensions [batch, x, derivative].
"""
patches = extract_patches(inputs, size=coefficients.shape[3].value)
return tf.einsum('bxdi,bxi->bxd', coefficients, patches)
def _multilayer_conv1d(inputs, hparams, num_targets, reuse=tf.AUTO_REUSE):
"""Apply multiple conv1d layers with input normalization."""
_, equation = equations.from_hparams(hparams)
assert_consistent_solution(equation, inputs)
net = inputs[:, :, tf.newaxis]
net /= equation.standard_deviation
activation = _NONLINEARITIES[hparams.nonlinearity]
for _ in range(hparams.num_layers - 1):
net = layers.conv1d_periodic_layer(net, filters=hparams.filter_size,
kernel_size=hparams.kernel_size,
activation=activation, center=True)
if hparams.num_layers == 0:
raise NotImplementedError('not implemented yet')
net = layers.conv1d_periodic_layer(
net, filters=num_targets, kernel_size=hparams.kernel_size,
activation=None, center=True)
return net
def predict_space_derivatives_directly(inputs, hparams, reuse=tf.AUTO_REUSE):
"""Predict finite difference coefficients directly from a neural net."""
_, equation = equations.from_hparams(hparams)
num_targets = len(equation.DERIVATIVE_ORDERS)
return _multilayer_conv1d(inputs, hparams, num_targets, reuse=reuse)
def predict_space_derivatives(
inputs: tf.Tensor,
hparams: tf.contrib.training.HParams,
reuse: object = tf.AUTO_REUSE) -> tf.Tensor:
"""Infer normalized derivatives from inputs with our forward model.
Args:
inputs: float32 Tensor with dimensions [batch, x].
hparams: model hyperparameters.
reuse: whether or not to reuse TensorFlow variables.
Returns:
Float32 Tensor with dimensions [batch, x, derivative].
"""
if hparams.model_target == 'coefficients':
coefficients = predict_coefficients(inputs, hparams, reuse=reuse)
return apply_coefficients(coefficients, inputs)
elif hparams.model_target == 'space_derivatives':
return predict_space_derivatives_directly(inputs, hparams, reuse=reuse)
else:
raise NotImplementedError(
'unrecognized model_target: {}'.format(hparams.model_target))
def predict_time_derivative_directly(inputs, hparams, reuse=tf.AUTO_REUSE):
"""Predict time derivatives directly, without using the equation of motion."""
output = _multilayer_conv1d(inputs, hparams, num_targets=1, reuse=reuse)
return tf.squeeze(output, axis=-1)
def predict_flux_directly(inputs, hparams, reuse=tf.AUTO_REUSE):
"""Predict flux directly, without using the equation of motion."""
_, equation = equations.from_hparams(hparams)
dx = equation.grid.solution_dx
output = _multilayer_conv1d(inputs, hparams, num_targets=1, reuse=reuse)
flux = tf.squeeze(output, axis=-1)
return equations.staggered_first_derivative(flux, dx)
def predict_time_derivative(
inputs: tf.Tensor,
hparams: tf.contrib.training.HParams,
reuse: object = tf.AUTO_REUSE) -> tf.Tensor:
"""Infer time evolution from inputs with our forward model.
Args:
inputs: float32 Tensor with dimensions [batch, x].
hparams: model hyperparameters.
reuse: whether or not to reuse TensorFlow variables.
Returns:
Float32 Tensor with dimensions [batch, x] with inferred time derivatives.
"""
if hparams.model_target == 'time_derivative':
return predict_time_derivative_directly(inputs, hparams, reuse=reuse)
elif hparams.model_target == 'flux':
return predict_flux_directly(inputs, hparams, reuse=reuse)
else:
space_derivatives = predict_space_derivatives(
inputs, hparams, reuse=reuse)
_, equation = equations.from_hparams(hparams)
return apply_space_derivatives(space_derivatives, inputs, equation)
def predict_time_evolution(inputs: tf.Tensor,
hparams: tf.contrib.training.HParams) -> tf.Tensor:
"""Infer time evolution from inputs with our neural network model.
Args:
inputs: float32 Tensor with dimensions [batch, x].
hparams: model hyperparameters.
Returns:
Float32 Tensor with dimensions [batch, x, num_time_steps+1] with the
integrated solution.
"""
def func(y, t):
del t # unused
return predict_time_derivative(y, hparams, reuse=True)
_, equation = equations.from_hparams(hparams)
return integrate_ode(
func, inputs, hparams.num_time_steps, equation.time_step)
def predict_result(inputs: tf.Tensor,
hparams: tf.contrib.training.HParams) -> tf.Tensor:
"""Infer predictions from inputs with our forward model.
Args:
inputs: float32 Tensor with dimensions [batch, x].
hparams: model hyperparameters.
Returns:
Float32 Tensor with dimensions [batch, x] with inferred time derivatives.
"""
if hparams.model_target in {'flux', 'time_derivative'}:
# use dummy values (all zeros) for space derivatives
if hparams.space_derivatives_weight:
raise ValueError('space derivatives are not predicted by model {}'
.format(hparams.model_target))
_, equation = equations.from_hparams(hparams)
num_derivatives = len(equation.DERIVATIVE_ORDERS)
space_derivatives = tf.zeros(
tf.concat([tf.shape(inputs), [num_derivatives]], axis=0))
time_derivative = predict_time_derivative(inputs, hparams)
else:
space_derivatives = predict_space_derivatives(inputs, hparams)
_, equation = equations.from_hparams(hparams)
time_derivative = apply_space_derivatives(
space_derivatives, inputs, equation)
if hparams.num_time_steps:
integrated_solution = predict_time_evolution(inputs, hparams)
else:
integrated_solution = None
return result_stack(space_derivatives, time_derivative, integrated_solution)
# TODO(shoyer): replace with TypeVar('T', np.ndarray, tf.Tensor) when pytype
# supports it (b/74212131)
T = TypeVar('T')
def abs_and_rel_error(predictions: T,
labels: T,
baseline: T,
error_floor: Union[T, float] = 1e-7) -> T:
"""Calculate absolute and relative errors.
Args:
predictions: predicted derivatives/solution, a float32 Tensor with
dimensions [batch, x, channel].
labels: actual derivatives/solution computed at high resolution, a float32
Tensor with dimensions [batch, x, channel].
baseline: baseline derivatives/solution computed with standard finite
differences from low-resolution inputs, a float32 Tensor with dimensions
[batch, x, channel].
error_floor: scalar or array with dimensions [channel] added
to baseline squared error when normalizing relative error.
Returns:
Scalar float32 Tensor indicating the loss.
"""
# Handle cases where we use WENO for only ground truth labels or predictions
if duckarray.get_shape(baseline)[-1] < duckarray.get_shape(labels)[-1]:
labels = labels[..., 1:]
elif duckarray.get_shape(baseline)[-1] > duckarray.get_shape(labels)[-1]:
labels = tf.concat([labels[..., :1], labels], axis=-1)
model_error = (labels - predictions) ** 2
baseline_error = (labels - baseline) ** 2
relative_error = model_error / (baseline_error + error_floor)
return (model_error, relative_error)
def loss_per_head(predictions: tf.Tensor,
labels: tf.Tensor,
baseline: tf.Tensor,
hparams: tf.contrib.training.HParams) -> tf.Tensor:
"""Calculate absolute and relative loss per training head.
Args:
predictions: predicted derivatives/solution, a float32 Tensor with
dimensions [batch, x, channel].
labels: actual derivatives/solution computed at high resolution, a float32
Tensor with dimensions [batch, x, channel].
baseline: baseline derivatives/solution computed with standard finite
differences from low-resolution inputs, a float32 Tensor with dimensions
[batch, x, channel].
hparams: model hyperparameters.
Returns:
Tensor with dimensions [abs/rel error, channel] with loss components.
"""
error_scale = np.array(hparams.error_scale).reshape(2, -1)
error_floor = np.array(hparams.error_floor)
model_error, relative_error = abs_and_rel_error(
predictions, labels, baseline, error_floor)
# dimensions [abs/rel error, channel]
stacked_mean_error = tf.stack(
[tf.reduce_mean(model_error, axis=(0, 1)),
tf.reduce_mean(relative_error, axis=(0, 1))], axis=0)
normalized_loss_per_head = stacked_mean_error * error_scale
if hparams.error_max:
normalized_loss_per_head = tf.where(
normalized_loss_per_head < hparams.error_max,
normalized_loss_per_head,
hparams.error_max * tf.ones_like(normalized_loss_per_head))
return normalized_loss_per_head
def weighted_loss(normalized_loss_per_head: tf.Tensor,
hparams: tf.contrib.training.HParams) -> tf.Tensor:
"""Calculate overall training loss.
Weights are normalized to sum to 1.0 (`relative_error+absolute_error` and
`space_derivatives_weight+time_derivatives_weight+integrated_solution_weight`)
before being used.
Args:
normalized_loss_per_head: tensor with dimensions [abs/rel error, channel].
hparams: model hyperparameters.
Returns:
Scalar float32 Tensor indicating the loss.
"""
# dimensions [abs/rel error]
abs_rel_weights = tf.convert_to_tensor(
[hparams.absolute_error_weight, hparams.relative_error_weight])
abs_rel_weights /= tf.reduce_sum(abs_rel_weights)
equation_type = equations.equation_type_from_hparams(hparams)
num_space = len(equation_type.DERIVATIVE_ORDERS)
num_integrated = normalized_loss_per_head.shape[-1].value - num_space - 1
# dimensions [channel]
weights_list = ([hparams.space_derivatives_weight / num_space] * num_space +
[hparams.time_derivative_weight])
if num_integrated:
weights_list.extend(
[hparams.integrated_solution_weight / num_integrated] * num_integrated)
channel_weights = tf.convert_to_tensor(weights_list)
channel_weights /= tf.reduce_sum(channel_weights)
# dimensions [abs/rel error, channel]
weights = abs_rel_weights[:, tf.newaxis] * channel_weights[tf.newaxis, :]
return tf.reduce_sum(weights * normalized_loss_per_head)
|
|
from __future__ import division, absolute_import
from matplotlib import rc,rcParams
rc('text', usetex=True)
rc('axes', linewidth=2)
rc('font', weight='bold')
# rcParams['text.latex.preamble'] = [r'\usepackage{sfmath} \boldmath']
import astropy.stats
import cPickle as pickle
import glob
import math
import matplotlib.pyplot as plt
from matplotlib import ticker
from matplotlib.ticker import FormatStrFormatter
import numpy as np
import os
import pandas as pd
from scipy import integrate,optimize,spatial
# class Vars(object):
# size_xlabel = 48
# size_ylabel = 48
# size_text = 20
# size_tick = 24
# size_legend = 24
class Vars(object):
size_xlabel = 24
size_ylabel = 24
size_text = 18
size_tick = 18
size_legend = 18
va = Vars()
plt.rc('font', **{'family': 'serif', 'serif':['Computer Modern']})
###############################################################################
pickle_in = r"C:\Users\Hannah\Desktop\Vanderbilt_REU\Stellar_mass_env_Density"
pickle_in+= r"\Pickle_output"
###############################################################################
pickle_in_rats = pickle_in
pickle_in_rats+=r"\ratio_bands.p"
rats_vals = pickle.load(open(pickle_in_rats,"rb"))
one_dex_ratios = rats_vals[0]
two_dex_ratios = rats_vals[1]
three_dex_ratios = rats_vals[2]
one_dex_rat_dict = {1:one_dex_ratios['1_4'],5:one_dex_ratios['5_4'],\
20:one_dex_ratios['20_4']}
two_dex_rat_dict = {1:two_dex_ratios['1_4'],5:two_dex_ratios['5_4'],\
20:two_dex_ratios['20_4']}
three_dex_rat_dict = {1:three_dex_ratios['1_4'],5:three_dex_ratios['5_4'],\
20:three_dex_ratios['20_4']}
all_rat_dict = {1:one_dex_rat_dict,2:two_dex_rat_dict,3:three_dex_rat_dict}
###############################################################################
pickle_in_meds = pickle_in
pickle_in_meds+=r"\med_bands.p"
meds_vals = pickle.load(open(pickle_in_meds,"rb"))
one_dex_meds = meds_vals[0]
two_dex_meds = meds_vals[1]
three_dex_meds = meds_vals[2]
# one_dm_slim = {1:one_dex_meds['1'],5:one_dex_meds['5'],20:one_dex_meds['20']}
# two_dm_slim = {1:two_dex_meds['1'],5:two_dex_meds['5'],20:two_dex_meds['20']}
# three_dm_slim = {1:three_dex_meds['1'],5:three_dex_meds['5'],\
# 20:three_dex_meds['20']}
one_dex_meds_dict = {1:one_dex_meds['1'],5:one_dex_meds['5'],\
20:one_dex_meds['20']}
two_dex_meds_dict = {1:two_dex_meds['1'],5:two_dex_meds['5'],\
20:two_dex_meds['20']}
three_dex_meds_dict = {1:three_dex_meds['1'],5:three_dex_meds['5'],\
20:three_dex_meds['20']}
all_meds_dict = {1:one_dex_meds_dict,2:two_dex_meds_dict,3:three_dex_meds_dict}
##dictionaries with [['10', '20', '1', '3', '2', '5']] keys
##yields a list with two arrays (upper and lower bounds)
###############################################################################
pickle_in_hists = pickle_in
pickle_in_hists+=r"\hist_bands.p"
hists_vals = pickle.load(open(pickle_in_hists,"rb"))
two_dex_hists_low = hists_vals[2]
hists_dict_low = {1:two_dex_hists_low['1_4'],5:two_dex_hists_low['5_4'],\
20:two_dex_hists_low['20_4']}
two_dex_hists_high = hists_vals[3]
hists_dict_high = {1:two_dex_hists_high['1_4'],5:two_dex_hists_high['5_4'],\
20:two_dex_hists_high['20_4']}
# for ii in neigh_vals:
# for tt in range (2):
# print len(hists_dict[ii][tt])
###############################################################################
##eco_low,eco_high,eco_ratio_info, eco_final_bins,eco_medians
pickle_in_eco = pickle_in
pickle_in_eco+=r"\eco_data.p"
eco_vals = pickle.load(open(pickle_in_eco,"rb"))
eco_low_hist = eco_vals[0]
eco_high_hist = eco_vals[1]
eco_ratio = {1:eco_vals[2][0][0][4],5:eco_vals[2][3][0][4],\
20:eco_vals[2][5][0][4]}
eco_rat_err = {1:eco_vals[2][0][1][1],5:eco_vals[2][3][1][1],\
20:eco_vals[2][5][1][1]}
eco_bins = {1:eco_vals[3][0][1],5:eco_vals[3][3][1],20:eco_vals[3][5][1]}
eco_meds = {1:eco_vals[4][0],5:eco_vals[4][3],20:eco_vals[4][5]}
bins = np.arange(9.1,11.9,0.2)
bin_centers= 0.5*(bins[:-1]+bins[1:])
##eco_meds... eventually 3 arrays. First is median line, second and third are \
##low and high bootstrap
###############################################################################
pickle_in_eco_hists = pickle_in
pickle_in_eco_hists+=r"\eco_hists.p"
eco_hists = pickle.load(open(pickle_in_eco_hists,"rb"))
eco_high_counts = {1:(eco_hists[1][1][4],eco_hists[1][1]['err_4']),\
5:(eco_hists[1][5][4],eco_hists[1][5]['err_4']),\
20:(eco_hists[1][20][4],eco_hists[1][20]['err_4'])}
eco_low_counts = {1:(eco_hists[0][1][4],eco_hists[0][1]['err_4']),\
5:(eco_hists[0][5][4],eco_hists[0][5]['err_4']),\
20:(eco_hists[0][20][4],eco_hists[0][20]['err_4'])}
eco_high_bins = {1:eco_hists[3][1][4],5:eco_hists[3][5][4],\
20:eco_hists[3][20][4]}
eco_low_bins = {1:eco_hists[2][1][4],5:eco_hists[2][5][4],\
20:eco_hists[2][20][4]}
###############################################################################
# def plot_bands(bin_centers,upper,lower,ax,plot_idx,color='silver',label=None):
# """
# """
# # ax.set_yscale('symlog')
# ax.set_ylim(0,4)
# ax.set_xlim(9.1,11.8)
# ax.set_xticks(np.arange(9.5, 12., 0.5))
# ax.set_yticks([0,1,2,3,4])
# ax.tick_params(axis='both', labelsize=12)
# ax.fill_between(bin_centers,upper,lower,color=color,alpha=0.1,label=label)
# if plot_idx == 0:
# ax.legend(loc='best')
# plot_neigh_dict = {0:1,1:5,2:20}
# title = 'n = {0}'.format(plot_neigh_dict[plot_idx])
# ax.text(0.05, 0.05, title,horizontalalignment='left',\
# verticalalignment='bottom',transform=ax.transAxes,fontsize=18)
# ###############################################################################
# def plot_eco_rats(bin_centers,y_vals,y_err,neigh_val,ax,frac_val,plot_idx):
# """
# """
# if plot_idx ==1:
# ax.set_xlabel('$\log\ (M_{*}/M_{\odot})$',fontsize=18)
# ax.axhline(y=1,c="darkorchid",linewidth=0.5,zorder=0)
# ax.errorbar(bin_centers,y_vals,yerr=y_err,\
# color='deeppink',linewidth=1,label='ECO')
###############################################################################
def plot_every_rat(bin_cens,upper,lower,ax,plot_idx,neigh_val,eco_bins,\
eco_vals,eco_err,color='silver',label=None,alpha=0.1):
label_eco = None
ax.set_ylim(0,4)
ax.set_xlim(9.1,11.8)
ax.set_xticks(np.arange(9.5, 12., 0.5))
ax.set_yticks([0,1,2,3,4])
ax.tick_params(axis='both', labelsize=va.size_tick)
ax.fill_between(bin_cens,upper,lower,color=color,alpha=alpha,label=label)
plot_neigh_dict = {0:1,1:5,2:20}
title = r"\boldmath$N=%d$"%(neigh_val)
if plot_idx == 2:
ax.text(0.05, 0.05, title,horizontalalignment='left',\
verticalalignment='bottom',transform=ax.transAxes,\
fontsize=va.size_text)
# if plot_idx ==2:
if neigh_val == 5:
ax.set_xlabel('$\log\ (M_{*}/M_{\odot})$',fontsize=va.size_xlabel)
if neigh_val == 1:
label_eco = 'ECO'
# ax.set_ylabel(r'\begin{center}$\textnormal{Ratio of Quartiles} \\ \Large (N_{Q4}/N_{Q1})$\end{center}',
# fontsize=va.size_ylabel,multialignment='center')
ax.set_ylabel(r'$\textnormal{N}_{high}\ /\ \textnormal{N}_{low}$',
fontsize=va.size_ylabel)
else:
label_eco = None
ax.axhline(y=1,c="darkorchid",linewidth=2,zorder=0)
ax.errorbar(eco_bins,eco_vals,yerr=eco_err,\
color='deeppink',linewidth=1,label=label_eco)
if neigh_val == 1:
ax.legend(loc='best',numpoints=1,fontsize=va.size_legend)
###############################################################################
nrow_num = int(1)
ncol_num = int(3)
dict_to_neigh = {1:1,5:2,20:3}
dict_to_zz = {1:0,5:1,20:2}
neigh_vals = np.array([1,5,20])
fig, axes = plt.subplots(nrows=nrow_num,ncols=ncol_num,figsize=(14,4),\
sharey=True)
# figure_title = fig.suptitle\
# (r"Abundance Ratio of Galaxies in Top/Bottom 25\% Density Regions", \
# fontsize=20)
# figure_title.set_y(1.0)
# fig.subplots_adjust(bottom=0.17, right=0.99, left=0.03,top=0.94, hspace=0, wspace=0)
scatter_dict_params = {1:['darkorchid','0.1 dex', 0.5],
2:['royalblue','0.2 dex', 0.4],
3:['violet','0.3 dex', 0.4]}
axes_flat= axes.flatten()
zz = int(0)
for yy in range(2,3):
for xx in neigh_vals:
upper = all_rat_dict[yy][xx][0]
lower = all_rat_dict[yy][xx][1]
if xx == 1:
ax_as = axes_flat[0]
if xx == 5:
ax_as = axes_flat[1]
if xx == 20:
ax_as = axes_flat[2]
# Color parameters
color, label, alpha = scatter_dict_params[yy]
# if yy == 1:
# color = 'lightpink'
# label = '0.1 dex'
# alpha = 0.5
if yy ==2:
color = 'royalblue'
label = '0.2 dex'
# label=None
alpha = 0.4
# if yy ==3:
# color = 'violet'
# label = '0.3 dex'
# alpha = 0.4
plot_every_rat(bin_centers[:-1],upper,lower,ax_as,yy,xx,\
eco_bins[xx],eco_ratio[xx],eco_rat_err[xx],\
color=color,label=label,alpha=alpha)
# zz = int(0)
# while zz == 0:
# for yy in range(2,3):
# for xx in neigh_vals:
# upper = all_rat_dict[yy][xx][0]
# lower = all_rat_dict[yy][xx][1]
# if xx == 1:
# ax_as = axes_flat[0]
# if xx == 5:
# ax_as = axes_flat[1]
# if xx == 20:
# ax_as = axes_flat[2]
# if yy == 1:
# color = 'lightpink'
# label = '0.1 dex'
# alpha = 0.5
# if yy ==2:
# color = 'royalblue'
# label = '0.2 dex'
# # label=None
# alpha = 0.4
# if yy ==3:
# color = 'violet'
# label = '0.3 dex'
# alpha = 0.4
# plot_every_rat(bin_centers[:-1],upper,lower,ax_as,yy,xx,\
# eco_bins[xx],eco_ratio[xx],eco_rat_err[xx],\
# color=color,label=label,alpha=alpha)
# zz+=1
# plt.tight_layout()
# plt.subplots_adjust(top=0.93,bottom=0.21,left=0.11,right=0.99,hspace=0.20,wspace=0.)
plt.subplots_adjust(top=0.93,bottom=0.21,left=0.06,right=0.99,hspace=0.20,\
wspace=0)
plt.show()
###############################################################################
def plot_every_med(bin_cens,upper,lower,ax,plot_idx,\
eco_vals,neigh_val,color='silver',label=None,alpha=0.1):
label_eco = None
ax.set_yscale('symlog')
ax.set_xlim(9.1,11.8)
ax.set_xticks(np.arange(9.5, 12., 0.5))
ax.tick_params(axis='both', labelsize=va.size_tick)
ax.set_ylim(0,10**1)
ax.set_yticks(np.arange(0,12,1))
ax.set_yticklabels(np.arange(1,10,4))
# ax.fill_between(bin_cens,upper,lower,color=color,alpha=alpha,label=label)
title = r"\boldmath$N=%d$"%(neigh_val)
ybot = np.array(eco_vals[0] - eco_vals[1])
ytop = np.array(eco_vals[2] - eco_vals[0])
ax.errorbar(bin_cens,eco_vals[0],yerr=(ybot,ytop),\
color='deeppink',linewidth=1,label=label_eco)
if neigh_val == 1:
ax.set_ylabel(r'$D_{N}\ \textnormal{(Mpc)}$',fontsize = \
va.size_ylabel)
if plot_idx == 2:
ax.text(0.05, 0.05, title,horizontalalignment='left',\
verticalalignment='bottom',transform=ax.transAxes,\
fontsize=va.size_text)
if neigh_val == 1:
ax.legend(loc='upper left',numpoints=1,fontsize=va.size_legend)
label_eco = 'ECO'
else:
label_eco = None
if neigh_val == 5:
ax.set_xlabel('$\log\ (M_{*}/M_{\odot})$',fontsize=va.size_xlabel)
###############################################################################
nrow_num = int(1)
ncol_num = int(3)
neigh_vals = np.array([1,5,20])
fig,axes = plt.subplots(nrows=nrow_num,ncols=ncol_num,figsize=(14,4),\
sharey=True,sharex=True)
# figure_title = fig.suptitle(r"Median Distance to Nth Nearest Neighbor", \
# fontsize=20)
# figure_title.set_y(1.0)
# fig.subplots_adjust(bottom=0.17, right=0.99, left=0.06,top=0.94, hspace=0, wspace=0)
axes_flat= axes.flatten()
# for yy in all_meds_dict:
for yy in range(2,3):
color, label, alpha = scatter_dict_params[yy]
for xx in neigh_vals:
upper = all_meds_dict[yy][xx][0]
lower = all_meds_dict[yy][xx][1]
if xx == 1:
ax_as = axes_flat[0]
if xx == 5:
ax_as = axes_flat[1]
if xx == 20:
ax_as = axes_flat[2]
plot_every_med(bin_centers[:-1],upper,lower,ax_as,yy,\
eco_meds[xx],xx,color=color,label=label,alpha=alpha)
# zz = int(0)
# while zz == 0:
# # for yy in all_meds_dict:
# for yy in ([1,3]):
# for xx in neigh_vals:
# upper = all_meds_dict[yy][xx][0]
# lower = all_meds_dict[yy][xx][1]
# if xx == 1:
# ax_as = axes_flat[0]
# if xx == 5:
# ax_as = axes_flat[1]
# if xx == 20:
# ax_as = axes_flat[2]
# if yy == 1:
# color = 'darkviolet'
# label = '0.1 dex'
# alpha = 0.5
# if yy == 2:
# color = 'royalblue'
# label = '0.2 dex'
# alpha = 0.3
# if yy == 3:
# color = 'springgreen'
# label = '0.3 dex'
# alpha = 0.5
# plot_every_med(bin_centers[:-1],upper,lower,ax_as,yy,\
# eco_meds[xx],xx,color=color,label=label,alpha=alpha)
# zz+=1
plt.subplots_adjust(top=0.93,bottom=0.21,left=0.06,right=0.99,hspace=0.20,\
wspace=0)
plt.show()
###############################################################################
def plot_eco_hists(bins_high,bins_low,high_counts,low_counts, \
high_counts_err,low_counts_err,ax,bin_centers,\
upper_h,lower_h,upper_l,lower_l,neigh_val):
ax.set_yscale('log')
ax.set_xticks(np.arange(9.5,12,0.5))
ax.set_xlim(9.1,11.7)
ax.tick_params(axis='both', labelsize=va.size_tick)
if neigh_val == 1:
label_high = 'High Density'
label_low = 'Low Density'
else:
label_high = None
label_low = None
# ax.fill_between(bin_centers,upper_h,lower_h,color='royalblue',\
# alpha=0.3)
# ax.fill_between(bin_centers,upper_l,lower_l,color='royalblue',\
# alpha=0.3)
ax.errorbar(bins_low,low_counts,\
yerr=low_counts_err,drawstyle='steps-mid',\
color='darkblue',label=label_low)
ax.errorbar(bins_high,high_counts,\
yerr=high_counts_err,drawstyle='steps-mid',\
color='deeppink',label=label_high)
title = r"\boldmath$N=%d$"%(neigh_val)
ax.text(0.05, 0.55, title,horizontalalignment='left',\
verticalalignment='bottom',transform=ax.transAxes,\
fontsize=va.size_text)
if neigh_val == 1:
ax.set_ylabel('Counts',fontsize=va.size_ylabel)
# (r'$\log\ \left(\frac{\textnormal{N}_{gal/bin}}{\textnormal{N}_{total}\ * \ dlogM}\right)$',\
# fontsize=24)
ax.legend(loc='best',fontsize=va.size_legend)
if neigh_val == 5:
ax.set_xlabel('$\log\ (M_{*}/M_{\odot})$',fontsize=va.size_xlabel)
###############################################################################
eco_low_hist = eco_vals[0]
eco_high_hist = eco_vals[1]
fig,axes = plt.subplots(nrows=nrow_num,ncols=ncol_num,figsize=(14,4),\
sharey=True,sharex=True)
# figure_title = fig.suptitle(r"Abundance of Galaxies in Top/Bottom 25\% Density Regions", \
# fontsize=20)
# figure_title.set_y(1.0)
# fig.subplots_adjust(bottom=0.17, right=0.99, left=0.06,top=0.94, hspace=0, wspace=0)
axes_flat = axes.flatten()
for xx in neigh_vals:
if xx == 1:
ax_as = axes_flat[0]
if xx == 5:
ax_as = axes_flat[1]
if xx == 20:
ax_as = axes_flat[2]
plot_eco_hists(eco_high_bins[xx],eco_low_bins[xx],\
eco_high_counts[xx][0],eco_low_counts[xx][0],\
eco_high_counts[xx][1],eco_low_counts[xx][1],\
ax_as,bin_centers[:-1],hists_dict_high[xx][0],\
hists_dict_high[xx][1],hists_dict_low[xx][0],\
hists_dict_low[xx][1],xx)
# zz = int(0)
# while zz ==0:
# for xx in neigh_vals:
# if xx == 1:
# ax_as = axes_flat[0]
# if xx == 5:
# ax_as = axes_flat[1]
# if xx == 20:
# ax_as = axes_flat[2]
# plot_eco_hists(eco_high_bins[xx],eco_low_bins[xx],\
# eco_high_counts[xx][0],eco_low_counts[xx][0],\
# eco_high_counts[xx][1],eco_low_counts[xx][1],\
# ax_as,bin_centers[:-1],hists_dict_high[xx][0],\
# hists_dict_high[xx][1],hists_dict_low[xx][0],\
# hists_dict_low[xx][1],xx)
# zz+=1
plt.subplots_adjust(top=0.93,bottom=0.21,left=0.11,right=0.99,hspace=0.20,
wspace=0)
plt.show()
|
|
# encoding: utf-8
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Removing M2M table for field required_skills on 'Feat'
db.delete_table('dnd_feat_required_skills')
def backwards(self, orm):
# Adding M2M table for field required_skills on 'Feat'
db.create_table('dnd_feat_required_skills', (
('id', models.AutoField(verbose_name='ID', primary_key=True, auto_created=True)),
('feat', models.ForeignKey(orm['dnd.feat'], null=False)),
('skill', models.ForeignKey(orm['dnd.skill'], null=False))
))
db.create_unique('dnd_feat_required_skills', ['feat_id', 'skill_id'])
models = {
'dnd.characterclass': {
'Meta': {'ordering': "['name']", 'unique_together': "(('name', 'rulebook'),)", 'object_name': 'CharacterClass'},
'code': ('django.db.models.fields.CharField', [], {'max_length': '32', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '64', 'db_index': 'True'}),
'page': ('django.db.models.fields.PositiveSmallIntegerField', [], {'null': 'True', 'blank': 'True'}),
'prestige': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'rulebook': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['dnd.Rulebook']"}),
'slug': ('django.db.models.fields.SlugField', [], {'max_length': '64', 'db_index': 'True'})
},
'dnd.dndedition': {
'Meta': {'ordering': "['name']", 'object_name': 'DndEdition'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '32', 'db_index': 'True'}),
'slug': ('django.db.models.fields.SlugField', [], {'max_length': '32', 'db_index': 'True'}),
'system': ('django.db.models.fields.CharField', [], {'max_length': '16'})
},
'dnd.domain': {
'Meta': {'ordering': "['name']", 'unique_together': "(('name', 'rulebook'),)", 'object_name': 'Domain'},
'code': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '32'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '64', 'db_index': 'True'}),
'page': ('django.db.models.fields.PositiveSmallIntegerField', [], {'null': 'True', 'blank': 'True'}),
'rulebook': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['dnd.Rulebook']"}),
'slug': ('django.db.models.fields.SlugField', [], {'max_length': '64', 'db_index': 'True'})
},
'dnd.feat': {
'Meta': {'ordering': "['name']", 'unique_together': "(('name', 'rulebook'),)", 'object_name': 'Feat'},
'benefit': ('django.db.models.fields.TextField', [], {}),
'description': ('django.db.models.fields.TextField', [], {}),
'feat_categories': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['dnd.FeatCategory']", 'symmetrical': 'False'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '64', 'db_index': 'True'}),
'normal': ('django.db.models.fields.TextField', [], {}),
'page': ('django.db.models.fields.PositiveSmallIntegerField', [], {'null': 'True', 'blank': 'True'}),
'rulebook': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['dnd.Rulebook']"}),
'short_description': ('django.db.models.fields.CharField', [], {'max_length': '256'}),
'slug': ('django.db.models.fields.SlugField', [], {'max_length': '64', 'db_index': 'True'}),
'special': ('django.db.models.fields.TextField', [], {}),
'special_feat_prerequisites': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['dnd.SpecialFeatPrerequisite']", 'through': "orm['dnd.FeatSpecialFeatPrerequisite']", 'symmetrical': 'False'})
},
'dnd.featcategory': {
'Meta': {'ordering': "['name']", 'object_name': 'FeatCategory'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '32'}),
'slug': ('django.db.models.fields.SlugField', [], {'max_length': '32', 'db_index': 'True'})
},
'dnd.featrequiresfeat': {
'Meta': {'object_name': 'FeatRequiresFeat'},
'additional_text': ('django.db.models.fields.CharField', [], {'max_length': '64'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'required_feat': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'required_by_feats'", 'to': "orm['dnd.Feat']"}),
'source_feat': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'required_feats'", 'to': "orm['dnd.Feat']"})
},
'dnd.featrequiresskill': {
'Meta': {'object_name': 'FeatRequiresSkill'},
'feat': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'required_skills'", 'to': "orm['dnd.Feat']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'min_rank': ('django.db.models.fields.PositiveSmallIntegerField', [], {}),
'skill': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['dnd.Skill']"})
},
'dnd.featspecialfeatprerequisite': {
'Meta': {'object_name': 'FeatSpecialFeatPrerequisite'},
'feat': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['dnd.Feat']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'special_feat_prerequisite': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['dnd.SpecialFeatPrerequisite']"}),
'value_1': ('django.db.models.fields.CharField', [], {'max_length': '256'}),
'value_2': ('django.db.models.fields.CharField', [], {'max_length': '256'})
},
'dnd.rulebook': {
'Meta': {'ordering': "['name']", 'object_name': 'Rulebook'},
'abbr': ('django.db.models.fields.CharField', [], {'max_length': '7'}),
'description': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'dnd_edition': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['dnd.DndEdition']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'img_url': ('django.db.models.fields.URLField', [], {'max_length': '255', 'blank': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '64', 'db_index': 'True'}),
'official_url': ('django.db.models.fields.URLField', [], {'max_length': '255', 'blank': 'True'}),
'slug': ('django.db.models.fields.SlugField', [], {'max_length': '64', 'db_index': 'True'}),
'year': ('django.db.models.fields.CharField', [], {'max_length': '4', 'null': 'True', 'blank': 'True'})
},
'dnd.skill': {
'Meta': {'ordering': "['name']", 'unique_together': "(('name', 'rulebook'),)", 'object_name': 'Skill'},
'action': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'armor_check_penalty': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'base_skill': ('django.db.models.fields.CharField', [], {'max_length': '4'}),
'check': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'description': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '32', 'db_index': 'True'}),
'page': ('django.db.models.fields.PositiveSmallIntegerField', [], {'null': 'True', 'blank': 'True'}),
'rulebook': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['dnd.Rulebook']"}),
'slug': ('django.db.models.fields.SlugField', [], {'max_length': '32', 'db_index': 'True'}),
'special': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'synergy': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'trained_only': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'try_again': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'untrained': ('django.db.models.fields.TextField', [], {'blank': 'True'})
},
'dnd.specialfeatprerequisite': {
'Meta': {'ordering': "['name']", 'object_name': 'SpecialFeatPrerequisite'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '64'}),
'print_format': ('django.db.models.fields.CharField', [], {'max_length': '64'})
},
'dnd.spell': {
'Meta': {'ordering': "['name']", 'unique_together': "(('name', 'rulebook'),)", 'object_name': 'Spell'},
'added': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'arcane_focus_component': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'area': ('django.db.models.fields.CharField', [], {'max_length': '256', 'null': 'True', 'blank': 'True'}),
'casting_time': ('django.db.models.fields.CharField', [], {'max_length': '256', 'null': 'True', 'blank': 'True'}),
'class_levels': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['dnd.CharacterClass']", 'through': "orm['dnd.SpellClassLevel']", 'symmetrical': 'False'}),
'description': ('django.db.models.fields.TextField', [], {}),
'descriptors': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['dnd.SpellDescriptor']", 'symmetrical': 'False'}),
'divine_focus_component': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'domain_levels': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['dnd.Domain']", 'through': "orm['dnd.SpellDomainLevel']", 'symmetrical': 'False'}),
'duration': ('django.db.models.fields.CharField', [], {'max_length': '256', 'null': 'True', 'blank': 'True'}),
'effect': ('django.db.models.fields.CharField', [], {'max_length': '256', 'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'material_component': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '64', 'db_index': 'True'}),
'page': ('django.db.models.fields.PositiveSmallIntegerField', [], {'null': 'True', 'blank': 'True'}),
'range': ('django.db.models.fields.CharField', [], {'max_length': '256', 'null': 'True', 'blank': 'True'}),
'rulebook': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['dnd.Rulebook']"}),
'saving_throw': ('django.db.models.fields.CharField', [], {'max_length': '128', 'null': 'True', 'blank': 'True'}),
'school': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['dnd.SpellSchool']"}),
'slug': ('django.db.models.fields.SlugField', [], {'max_length': '64', 'db_index': 'True'}),
'somatic_component': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'spell_resistance': ('django.db.models.fields.CharField', [], {'max_length': '64', 'null': 'True', 'blank': 'True'}),
'sub_school': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['dnd.SpellSubSchool']", 'null': 'True', 'blank': 'True'}),
'target': ('django.db.models.fields.CharField', [], {'max_length': '256', 'null': 'True', 'blank': 'True'}),
'verbal_component': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'xp_component': ('django.db.models.fields.BooleanField', [], {'default': 'False'})
},
'dnd.spellclasslevel': {
'Meta': {'ordering': "['spell', 'level']", 'unique_together': "(('character_class', 'spell'),)", 'object_name': 'SpellClassLevel'},
'character_class': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['dnd.CharacterClass']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'level': ('django.db.models.fields.PositiveSmallIntegerField', [], {}),
'spell': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['dnd.Spell']"})
},
'dnd.spelldescriptor': {
'Meta': {'ordering': "['name']", 'object_name': 'SpellDescriptor'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '64', 'db_index': 'True'}),
'slug': ('django.db.models.fields.SlugField', [], {'max_length': '64', 'db_index': 'True'})
},
'dnd.spelldomainlevel': {
'Meta': {'ordering': "['spell', 'level']", 'unique_together': "(('domain', 'spell'),)", 'object_name': 'SpellDomainLevel'},
'domain': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['dnd.Domain']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'level': ('django.db.models.fields.PositiveSmallIntegerField', [], {}),
'spell': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['dnd.Spell']"})
},
'dnd.spellschool': {
'Meta': {'ordering': "['name']", 'object_name': 'SpellSchool'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '32'}),
'slug': ('django.db.models.fields.SlugField', [], {'max_length': '32', 'db_index': 'True'})
},
'dnd.spellsubschool': {
'Meta': {'ordering': "['name']", 'object_name': 'SpellSubSchool'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '32'}),
'slug': ('django.db.models.fields.SlugField', [], {'max_length': '32', 'db_index': 'True'})
},
'dnd.textfeatprerequisite': {
'Meta': {'ordering': "['text']", 'object_name': 'TextFeatPrerequisite'},
'feat': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['dnd.Feat']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'text': ('django.db.models.fields.CharField', [], {'max_length': '256'})
}
}
complete_apps = ['dnd']
|
|
#!/usr/bin/python
# @lint-avoid-python-3-compatibility-imports
#
# opensnoop Trace open() syscalls.
# For Linux, uses BCC, eBPF. Embedded C.
#
# USAGE: opensnoop [-h] [-T] [-x] [-p PID] [-d DURATION] [-t TID] [-n NAME]
#
# Copyright (c) 2015 Brendan Gregg.
# Licensed under the Apache License, Version 2.0 (the "License")
#
# 17-Sep-2015 Brendan Gregg Created this.
# 29-Apr-2016 Allan McAleavy Updated for BPF_PERF_OUTPUT.
# 08-Oct-2016 Dina Goldshtein Support filtering by PID and TID.
# 28-Dec-2018 Tim Douglas Print flags argument, enable filtering
# 06-Jan-2019 Takuma Kume Support filtering by UID
from __future__ import print_function
from bcc import ArgString, BPF
from bcc.containers import filter_by_containers
from bcc.utils import printb
import argparse
from datetime import datetime, timedelta
import os
# arguments
examples = """examples:
./opensnoop # trace all open() syscalls
./opensnoop -T # include timestamps
./opensnoop -U # include UID
./opensnoop -x # only show failed opens
./opensnoop -p 181 # only trace PID 181
./opensnoop -t 123 # only trace TID 123
./opensnoop -u 1000 # only trace UID 1000
./opensnoop -d 10 # trace for 10 seconds only
./opensnoop -n main # only print process names containing "main"
./opensnoop -e # show extended fields
./opensnoop -f O_WRONLY -f O_RDWR # only print calls for writing
./opensnoop --cgroupmap mappath # only trace cgroups in this BPF map
./opensnoop --mntnsmap mappath # only trace mount namespaces in the map
"""
parser = argparse.ArgumentParser(
description="Trace open() syscalls",
formatter_class=argparse.RawDescriptionHelpFormatter,
epilog=examples)
parser.add_argument("-T", "--timestamp", action="store_true",
help="include timestamp on output")
parser.add_argument("-U", "--print-uid", action="store_true",
help="print UID column")
parser.add_argument("-x", "--failed", action="store_true",
help="only show failed opens")
parser.add_argument("-p", "--pid",
help="trace this PID only")
parser.add_argument("-t", "--tid",
help="trace this TID only")
parser.add_argument("--cgroupmap",
help="trace cgroups in this BPF map only")
parser.add_argument("--mntnsmap",
help="trace mount namespaces in this BPF map only")
parser.add_argument("-u", "--uid",
help="trace this UID only")
parser.add_argument("-d", "--duration",
help="total duration of trace in seconds")
parser.add_argument("-n", "--name",
type=ArgString,
help="only print process names containing this name")
parser.add_argument("--ebpf", action="store_true",
help=argparse.SUPPRESS)
parser.add_argument("-e", "--extended_fields", action="store_true",
help="show extended fields")
parser.add_argument("-f", "--flag_filter", action="append",
help="filter on flags argument (e.g., O_WRONLY)")
args = parser.parse_args()
debug = 0
if args.duration:
args.duration = timedelta(seconds=int(args.duration))
flag_filter_mask = 0
for flag in args.flag_filter or []:
if not flag.startswith('O_'):
exit("Bad flag: %s" % flag)
try:
flag_filter_mask |= getattr(os, flag)
except AttributeError:
exit("Bad flag: %s" % flag)
# define BPF program
bpf_text = """
#include <uapi/linux/ptrace.h>
#include <uapi/linux/limits.h>
#include <linux/sched.h>
struct val_t {
u64 id;
char comm[TASK_COMM_LEN];
const char *fname;
int flags; // EXTENDED_STRUCT_MEMBER
};
struct data_t {
u64 id;
u64 ts;
u32 uid;
int ret;
char comm[TASK_COMM_LEN];
char fname[NAME_MAX];
int flags; // EXTENDED_STRUCT_MEMBER
};
BPF_PERF_OUTPUT(events);
"""
bpf_text_kprobe = """
BPF_HASH(infotmp, u64, struct val_t);
int trace_return(struct pt_regs *ctx)
{
u64 id = bpf_get_current_pid_tgid();
struct val_t *valp;
struct data_t data = {};
u64 tsp = bpf_ktime_get_ns();
valp = infotmp.lookup(&id);
if (valp == 0) {
// missed entry
return 0;
}
bpf_probe_read_kernel(&data.comm, sizeof(data.comm), valp->comm);
bpf_probe_read_user(&data.fname, sizeof(data.fname), (void *)valp->fname);
data.id = valp->id;
data.ts = tsp / 1000;
data.uid = bpf_get_current_uid_gid();
data.flags = valp->flags; // EXTENDED_STRUCT_MEMBER
data.ret = PT_REGS_RC(ctx);
events.perf_submit(ctx, &data, sizeof(data));
infotmp.delete(&id);
return 0;
}
"""
bpf_text_kprobe_header_open = """
int syscall__trace_entry_open(struct pt_regs *ctx, const char __user *filename, int flags)
{
"""
bpf_text_kprobe_header_openat = """
int syscall__trace_entry_openat(struct pt_regs *ctx, int dfd, const char __user *filename, int flags)
{
"""
bpf_text_kprobe_header_openat2 = """
#include <uapi/linux/openat2.h>
int syscall__trace_entry_openat2(struct pt_regs *ctx, int dfd, const char __user *filename, struct open_how *how)
{
int flags = how->flags;
"""
bpf_text_kprobe_body = """
struct val_t val = {};
u64 id = bpf_get_current_pid_tgid();
u32 pid = id >> 32; // PID is higher part
u32 tid = id; // Cast and get the lower part
u32 uid = bpf_get_current_uid_gid();
PID_TID_FILTER
UID_FILTER
FLAGS_FILTER
if (container_should_be_filtered()) {
return 0;
}
if (bpf_get_current_comm(&val.comm, sizeof(val.comm)) == 0) {
val.id = id;
val.fname = filename;
val.flags = flags; // EXTENDED_STRUCT_MEMBER
infotmp.update(&id, &val);
}
return 0;
};
"""
bpf_text_kfunc_header_open = """
#if defined(CONFIG_ARCH_HAS_SYSCALL_WRAPPER) && !defined(__s390x__)
KRETFUNC_PROBE(FNNAME, struct pt_regs *regs, int ret)
{
const char __user *filename = (char *)PT_REGS_PARM1(regs);
int flags = PT_REGS_PARM2(regs);
#else
KRETFUNC_PROBE(FNNAME, const char __user *filename, int flags, int ret)
{
#endif
"""
bpf_text_kfunc_header_openat = """
#if defined(CONFIG_ARCH_HAS_SYSCALL_WRAPPER) && !defined(__s390x__)
KRETFUNC_PROBE(FNNAME, struct pt_regs *regs, int ret)
{
int dfd = PT_REGS_PARM1(regs);
const char __user *filename = (char *)PT_REGS_PARM2(regs);
int flags = PT_REGS_PARM3(regs);
#else
KRETFUNC_PROBE(FNNAME, int dfd, const char __user *filename, int flags, int ret)
{
#endif
"""
bpf_text_kfunc_header_openat2 = """
#include <uapi/linux/openat2.h>
#if defined(CONFIG_ARCH_HAS_SYSCALL_WRAPPER) && !defined(__s390x__)
KRETFUNC_PROBE(FNNAME, struct pt_regs *regs, int ret)
{
int dfd = PT_REGS_PARM1(regs);
const char __user *filename = (char *)PT_REGS_PARM2(regs);
struct open_how __user *how = (struct open_how *)PT_REGS_PARM3(regs);
int flags = how->flags;
#else
KRETFUNC_PROBE(FNNAME, int dfd, const char __user *filename, struct open_how __user *how, int ret)
{
int flags = how->flags;
#endif
"""
bpf_text_kfunc_body = """
u64 id = bpf_get_current_pid_tgid();
u32 pid = id >> 32; // PID is higher part
u32 tid = id; // Cast and get the lower part
u32 uid = bpf_get_current_uid_gid();
PID_TID_FILTER
UID_FILTER
FLAGS_FILTER
if (container_should_be_filtered()) {
return 0;
}
struct data_t data = {};
bpf_get_current_comm(&data.comm, sizeof(data.comm));
u64 tsp = bpf_ktime_get_ns();
bpf_probe_read_user(&data.fname, sizeof(data.fname), (void *)filename);
data.id = id;
data.ts = tsp / 1000;
data.uid = bpf_get_current_uid_gid();
data.flags = flags; // EXTENDED_STRUCT_MEMBER
data.ret = ret;
events.perf_submit(ctx, &data, sizeof(data));
return 0;
}
"""
b = BPF(text='')
# open and openat are always in place since 2.6.16
fnname_open = b.get_syscall_prefix().decode() + 'open'
fnname_openat = b.get_syscall_prefix().decode() + 'openat'
fnname_openat2 = b.get_syscall_prefix().decode() + 'openat2'
if b.ksymname(fnname_openat2) == -1:
fnname_openat2 = None
is_support_kfunc = BPF.support_kfunc()
if is_support_kfunc:
bpf_text += bpf_text_kfunc_header_open.replace('FNNAME', fnname_open)
bpf_text += bpf_text_kfunc_body
bpf_text += bpf_text_kfunc_header_openat.replace('FNNAME', fnname_openat)
bpf_text += bpf_text_kfunc_body
if fnname_openat2:
bpf_text += bpf_text_kfunc_header_openat2.replace('FNNAME', fnname_openat2)
bpf_text += bpf_text_kfunc_body
else:
bpf_text += bpf_text_kprobe
bpf_text += bpf_text_kprobe_header_open
bpf_text += bpf_text_kprobe_body
bpf_text += bpf_text_kprobe_header_openat
bpf_text += bpf_text_kprobe_body
if fnname_openat2:
bpf_text += bpf_text_kprobe_header_openat2
bpf_text += bpf_text_kprobe_body
if args.tid: # TID trumps PID
bpf_text = bpf_text.replace('PID_TID_FILTER',
'if (tid != %s) { return 0; }' % args.tid)
elif args.pid:
bpf_text = bpf_text.replace('PID_TID_FILTER',
'if (pid != %s) { return 0; }' % args.pid)
else:
bpf_text = bpf_text.replace('PID_TID_FILTER', '')
if args.uid:
bpf_text = bpf_text.replace('UID_FILTER',
'if (uid != %s) { return 0; }' % args.uid)
else:
bpf_text = bpf_text.replace('UID_FILTER', '')
bpf_text = filter_by_containers(args) + bpf_text
if args.flag_filter:
bpf_text = bpf_text.replace('FLAGS_FILTER',
'if (!(flags & %d)) { return 0; }' % flag_filter_mask)
else:
bpf_text = bpf_text.replace('FLAGS_FILTER', '')
if not (args.extended_fields or args.flag_filter):
bpf_text = '\n'.join(x for x in bpf_text.split('\n')
if 'EXTENDED_STRUCT_MEMBER' not in x)
if debug or args.ebpf:
print(bpf_text)
if args.ebpf:
exit()
# initialize BPF
b = BPF(text=bpf_text)
if not is_support_kfunc:
b.attach_kprobe(event=fnname_open, fn_name="syscall__trace_entry_open")
b.attach_kretprobe(event=fnname_open, fn_name="trace_return")
b.attach_kprobe(event=fnname_openat, fn_name="syscall__trace_entry_openat")
b.attach_kretprobe(event=fnname_openat, fn_name="trace_return")
if fnname_openat2:
b.attach_kprobe(event=fnname_openat2, fn_name="syscall__trace_entry_openat2")
b.attach_kretprobe(event=fnname_openat2, fn_name="trace_return")
initial_ts = 0
# header
if args.timestamp:
print("%-14s" % ("TIME(s)"), end="")
if args.print_uid:
print("%-6s" % ("UID"), end="")
print("%-6s %-16s %4s %3s " %
("TID" if args.tid else "PID", "COMM", "FD", "ERR"), end="")
if args.extended_fields:
print("%-9s" % ("FLAGS"), end="")
print("PATH")
# process event
def print_event(cpu, data, size):
event = b["events"].event(data)
global initial_ts
# split return value into FD and errno columns
if event.ret >= 0:
fd_s = event.ret
err = 0
else:
fd_s = -1
err = - event.ret
if not initial_ts:
initial_ts = event.ts
if args.failed and (event.ret >= 0):
return
if args.name and bytes(args.name) not in event.comm:
return
if args.timestamp:
delta = event.ts - initial_ts
printb(b"%-14.9f" % (float(delta) / 1000000), nl="")
if args.print_uid:
printb(b"%-6d" % event.uid, nl="")
printb(b"%-6d %-16s %4d %3d " %
(event.id & 0xffffffff if args.tid else event.id >> 32,
event.comm, fd_s, err), nl="")
if args.extended_fields:
printb(b"%08o " % event.flags, nl="")
printb(b'%s' % event.fname)
# loop with callback to print_event
b["events"].open_perf_buffer(print_event, page_cnt=64)
start_time = datetime.now()
while not args.duration or datetime.now() - start_time < args.duration:
try:
b.perf_buffer_poll()
except KeyboardInterrupt:
exit()
|
|
# Copyright The Cloud Custodian Authors.
# SPDX-License-Identifier: Apache-2.0
from azure.cosmos.cosmos_client import CosmosClient
from ..azure_common import BaseTest, arm_template, cassette_name
from c7n_azure.resources.cosmos_db import (CosmosDBChildResource, CosmosDBFirewallRulesFilter,
CosmosFirewallBypassFilter,
PORTAL_IPS, AZURE_CLOUD_IPS, THROUGHPUT_MULTIPLIER)
from c7n_azure.session import Session
from mock import patch, Mock
from netaddr import IPSet
from parameterized import parameterized
from c7n.utils import local_session
def get_ext_ip():
# local external ip needs to be added to the database when recording
from requests import get
return get('https://checkip.amazonaws.com').text.rstrip()
def get_portal_ips():
# https://docs.microsoft.com/en-us/azure/cosmos-db/how-to-configure-firewall?WT.mc_id=Portal-Microsoft_Azure_DocumentDB#connections-from-the-azure-portal
return set('104.42.195.92,40.76.54.131,52.176.6.30,52.169.50.45,52.187.184.26'.split(','))
def get_azuredc_ip():
# this means "azure datacenters only"
return '0.0.0.0'
def get_ip_rules(ip_str):
if ip_str == '':
return []
return [{'ipAddressOrRange': ip} for ip in ip_str.replace(' ', '').split(',')]
class CosmosDBTest(BaseTest):
def setUp(self):
super(CosmosDBTest, self).setUp()
def test_cosmos_db_schema_validate(self):
with self.sign_out_patch():
p = self.load_policy({
'name': 'test-azure-cosmos-db',
'resource': 'azure.cosmosdb'
}, validate=True)
self.assertTrue(p)
p = self.load_policy({
'name': 'test-azure-cosmos-db',
'resource': 'azure.cosmosdb-database'
}, validate=True)
self.assertTrue(p)
p = self.load_policy({
'name': 'test-azure-cosmos-db',
'resource': 'azure.cosmosdb-collection'
}, validate=True)
self.assertTrue(p)
p = self.load_policy({
'name': 'test-azure-cosmosdb',
'resource': 'azure.cosmosdb',
'filters': [
{'type': 'value',
'key': 'name',
'op': 'glob',
'value_type': 'normalize',
'value': 'cctestcosmosdb*'}],
'actions': [
{'type': 'set-firewall-rules',
'bypass-rules': ['Portal'],
'ip-rules': ['11.12.13.14', '21.22.23.24']
}
]
}, validate=True)
self.assertTrue(p)
@arm_template('cosmosdb.json')
def test_find_by_name(self):
p = self.load_policy({
'name': 'test-azure-cosmosdb',
'resource': 'azure.cosmosdb',
'filters': [
{'type': 'value',
'key': 'name',
'op': 'glob',
'value_type': 'normalize',
'value': 'cctestcosmosdb*'}],
})
resources = p.run()
self.assertEqual(len(resources), 1)
@arm_template('cosmosdb.json')
def test_find_by_name_database(self):
p = self.load_policy({
'name': 'test-azure-cosmosdb',
'resource': 'azure.cosmosdb-database',
'filters': [
{'type': 'value',
'key': 'id',
'op': 'eq',
'value_type': 'normalize',
'value': 'cctestcdatabase'}],
})
resources = p.run()
self.assertEqual(len(resources), 1)
@arm_template('cosmosdb.json')
def test_find_by_name_collection(self):
p = self.load_policy({
'name': 'test-azure-cosmosdb',
'resource': 'azure.cosmosdb-collection',
'filters': [
{'type': 'value',
'key': 'id',
'op': 'eq',
'value_type': 'normalize',
'value': 'cccontainer'}],
})
resources = p.run()
self.assertEqual(len(resources), 1)
@arm_template('cosmosdb.json')
def test_collection_metrics_filter(self):
p = self.load_policy({
'name': 'test-azure-cosmosdb',
'resource': 'azure.cosmosdb-collection',
'filters': [
{'type': 'value',
'key': 'id',
'op': 'eq',
'value_type': 'normalize',
'value': 'cccontainer'},
{'type': 'metric',
'metric': 'ProvisionedThroughput',
'op': 'le',
'aggregation': 'average',
'interval': 'PT5M',
'threshold': 1000}
]
}, validate=True)
resources = p.run()
self.assertEqual(len(resources), 1)
@arm_template('cosmosdb.json')
def test_database_metrics_filter(self):
p = self.load_policy({
'name': 'test-azure-cosmosdb',
'resource': 'azure.cosmosdb-database',
'filters': [
{'type': 'value',
'key': 'id',
'op': 'eq',
'value_type': 'normalize',
'value': 'cctestcdatabase'},
{'type': 'metric',
'metric': 'ProvisionedThroughput',
'op': 'le',
'aggregation': 'average',
'interval': 'PT5M',
'threshold': 1000}
]
}, validate=True)
resources = p.run()
self.assertEqual(len(resources), 1)
@arm_template('cosmosdb.json')
@cassette_name('firewall_include')
def test_firewall_rules_include(self):
p = self.load_policy({
'name': 'test-azure-cosmosdb',
'resource': 'azure.cosmosdb',
'filters': [
{'type': 'firewall-rules',
'include': [get_ext_ip()]}],
}, validate=True)
resources = p.run()
self.assertEqual(1, len(resources))
@arm_template('cosmosdb.json')
@cassette_name('firewall_include')
def test_firewall_rules_include_cidr(self):
p = self.load_policy({
'name': 'test-azure-cosmosdb',
'resource': 'azure.cosmosdb',
'filters': [
{'type': 'firewall-rules',
'include': [get_ext_ip() + '/32']}],
}, validate=True)
resources = p.run()
self.assertEqual(1, len(resources))
@arm_template('cosmosdb.json')
@cassette_name('firewall')
def test_firewall_rules_not_equal(self):
p = self.load_policy({
'name': 'test-azure-cosmosdb',
'resource': 'azure.cosmosdb',
'filters': [
{'type': 'firewall-rules',
'equal': ['1.0.0.0/1']}],
}, validate=True)
resources = p.run()
self.assertEqual(0, len(resources))
@arm_template('cosmosdb.json')
@cassette_name('firewall')
def test_firewall_bypass(self):
p = self.load_policy({
'name': 'test-azure-cosmosdb',
'resource': 'azure.cosmosdb',
'filters': [
{'type': 'firewall-bypass',
'mode': 'equal',
'list': ['Portal']}]
})
resources = p.run()
self.assertEqual(1, len(resources))
@arm_template('cosmosdb.json')
def test_offer_collection(self):
p = self.load_policy({
'name': 'test-azure-cosmosdb',
'resource': 'azure.cosmosdb-collection',
'filters': [
{'type': 'offer',
'key': 'content.offerThroughput',
'op': 'gt',
'value': 100}],
})
resources = p.run()
self.assertEqual(1, len(resources))
self.assertEqual('Hash', resources[0]['partitionKey']['kind'])
@arm_template('cosmosdb.json')
def test_store_throughput_state_collection_action(self):
p = self.load_policy({
'name': 'test-azure-cosmosdb',
'resource': 'azure.cosmosdb-collection',
'filters': [
{
'type': 'value',
'key': 'id',
'op': 'eq',
'value': 'cccontainer'
}
],
'actions': [
{
'type': 'save-throughput-state',
'state-tag': 'test-store-throughput'
}
]
})
collections = p.run()
self.assertEqual(len(collections), 1)
account_name = collections[0]['c7n:parent']['name']
# The tag can take longer than 60 seconds to commit
self.sleep_in_live_mode(120)
client = local_session(Session).client(
'azure.mgmt.cosmosdb.CosmosDBManagementClient')
cosmos_account = client.database_accounts.get('test_cosmosdb', account_name)
self.assertTrue('test-store-throughput' in cosmos_account.tags)
tag_value = cosmos_account.tags['test-store-throughput']
expected_throughput = collections[0]['c7n:offer']['content']['offerThroughput']
expected_scaled_throughput = int(expected_throughput / THROUGHPUT_MULTIPLIER)
expected_tag_value = '{}:{}'.format(collections[0]['_rid'], expected_scaled_throughput)
self.assertEqual(expected_tag_value, tag_value)
class CosmosDBFirewallFilterTest(BaseTest):
def test_query_firewall_disabled(self):
resource = {'properties': {'ipRules': get_ip_rules(''),
'isVirtualNetworkFilterEnabled': False}}
expected = IPSet(['0.0.0.0/0'])
self.assertEqual(expected, self._get_filter()._query_rules(resource))
def test_query_block_everything(self):
resource = {'properties': {'ipRules': get_ip_rules(''),
'isVirtualNetworkFilterEnabled': True}}
expected = IPSet()
self.assertEqual(expected, self._get_filter()._query_rules(resource))
def test_query_regular(self):
resource = {'properties': {'ipRules': get_ip_rules('10.0.0.0/16,8.8.8.8'),
'isVirtualNetworkFilterEnabled': False}}
expected = IPSet(['10.0.0.0/16', '8.8.8.8'])
self.assertEqual(expected, self._get_filter()._query_rules(resource))
def test_query_regular_plus_portal(self):
extra = ','.join(PORTAL_IPS)
resource = {'properties': {'ipRules': get_ip_rules(extra + ',10.0.0.0/16,8.8.8.8'),
'isVirtualNetworkFilterEnabled': False}}
expected = IPSet(['10.0.0.0/16', '8.8.8.8'])
self.assertEqual(expected, self._get_filter()._query_rules(resource))
def test_query_regular_plus_cloud(self):
extra = ', '.join(AZURE_CLOUD_IPS)
resource = {'properties': {'ipRules': get_ip_rules(extra + ',10.0.0.0/16,8.8.8.8'),
'isVirtualNetworkFilterEnabled': False}}
expected = IPSet(['10.0.0.0/16', '8.8.8.8'])
self.assertEqual(expected, self._get_filter()._query_rules(resource))
def test_query_regular_plus_portal_cloud(self):
extra = ','.join(PORTAL_IPS + AZURE_CLOUD_IPS)
resource = {'properties': {'ipRules': get_ip_rules(extra + ',10.0.0.0/16,8.8.8.8'),
'isVirtualNetworkFilterEnabled': False}}
expected = IPSet(['10.0.0.0/16', '8.8.8.8'])
self.assertEqual(expected, self._get_filter()._query_rules(resource))
def test_query_regular_plus_partial_cloud(self):
extra = ','.join(PORTAL_IPS[1:])
resource = {'properties': {'ipRules': get_ip_rules(extra + ',10.0.0.0/16,8.8.8.8'),
'isVirtualNetworkFilterEnabled': False}}
expected = IPSet(['10.0.0.0/16', '8.8.8.8'] + PORTAL_IPS[1:])
self.assertEqual(expected, self._get_filter()._query_rules(resource))
def _get_filter(self, mode='equal'):
data = {mode: ['10.0.0.0/8', '127.0.0.1']}
return CosmosDBFirewallRulesFilter(data, Mock())
class CosmosDBFirewallBypassFilterTest(BaseTest):
scenarios = [
[get_ip_rules(''), False, ['AzureCloud', 'Portal']],
[get_ip_rules(''), True, []],
[get_ip_rules('1.0.0.0'), True, []],
[get_ip_rules(','.join(AZURE_CLOUD_IPS)), False, ['AzureCloud']],
[get_ip_rules(','.join(PORTAL_IPS)), False, ['Portal']],
[get_ip_rules(','.join(AZURE_CLOUD_IPS + PORTAL_IPS)), False, ['AzureCloud', 'Portal']],
[get_ip_rules(','.join(AZURE_CLOUD_IPS + ['10.0.0.8'])), False, ['AzureCloud']],
[get_ip_rules(','.join(PORTAL_IPS + ['10.0.0.8'])), False, ['Portal']],
[get_ip_rules(','.join(AZURE_CLOUD_IPS + PORTAL_IPS + ['10.0.0.8'])), False,
['AzureCloud', 'Portal']],
]
@parameterized.expand(scenarios)
def test_run(self, ip_range, vnet_filter_enabled, expected):
resource = {'properties': {'ipRules': ip_range,
'isVirtualNetworkFilterEnabled': vnet_filter_enabled}}
f = CosmosFirewallBypassFilter({'mode': 'equal', 'list': []}, Mock())
self.assertEqual(expected, f._query_bypass(resource))
class CosmosDBFirewallActionTest(BaseTest):
@patch('azure.mgmt.cosmosdb.operations._database_accounts_operations.'
'DatabaseAccountsOperations.begin_create_or_update')
@arm_template('cosmosdb.json')
@cassette_name('firewall_action')
def test_set_ip_range_filter_append(self, update_mock):
p = self.load_policy({
'name': 'test-azure-cosmosdb',
'resource': 'azure.cosmosdb',
'filters': [
{'type': 'value',
'key': 'name',
'op': 'glob',
'value_type': 'normalize',
'value': 'cctestcosmosdb*'}],
'actions': [
{'type': 'set-firewall-rules',
'append': True,
'ip-rules': ['11.12.13.14', '21.22.23.24']
}
]
})
resources = p.run()
self.assertEqual(len(resources), 1)
self.assertEqual(1, len(update_mock.mock_calls))
name, args, kwargs = update_mock.mock_calls[0]
expected = set(['11.12.13.14', '21.22.23.24', get_ext_ip()])
expected.update(get_portal_ips())
actual = set([ip['ipAddressOrRange']
for ip in kwargs['create_update_parameters']['properties']['ipRules']])
self.assertEqual(resources[0]['resourceGroup'], args[0])
self.assertEqual(resources[0]['name'], args[1])
self.assertEqual(expected, actual)
@patch('azure.mgmt.cosmosdb.operations._database_accounts_operations.'
'DatabaseAccountsOperations.begin_create_or_update')
@arm_template('cosmosdb.json')
@cassette_name('firewall_action')
def test_set_ip_range_filter_replace(self, update_mock):
ext_ip = get_ext_ip()
p = self.load_policy({
'name': 'test-azure-cosmosdb',
'resource': 'azure.cosmosdb',
'filters': [
{'type': 'value',
'key': 'name',
'op': 'glob',
'value_type': 'normalize',
'value': 'cctestcosmosdb*'}],
'actions': [
{'type': 'set-firewall-rules',
'append': False,
'ip-rules': [ext_ip, '11.12.13.14', '21.22.23.24']
}
]
})
resources = p.run()
self.assertEqual(len(resources), 1)
self.assertEqual(1, len(update_mock.mock_calls))
name, args, kwargs = update_mock.mock_calls[0]
expected = set(['11.12.13.14', '21.22.23.24', ext_ip])
expected.update(get_portal_ips())
actual = set([ip['ipAddressOrRange']
for ip in kwargs['create_update_parameters']['properties']['ipRules']])
self.assertEqual(resources[0]['resourceGroup'], args[0])
self.assertEqual(resources[0]['name'], args[1])
self.assertEqual(expected, actual)
@patch('azure.mgmt.cosmosdb.operations._database_accounts_operations.'
'DatabaseAccountsOperations.begin_create_or_update')
@arm_template('cosmosdb.json')
@cassette_name('firewall_action')
def test_set_ip_range_filter_replace_bypass(self, update_mock):
ext_ip = get_ext_ip()
p = self.load_policy({
'name': 'test-azure-cosmosdb',
'resource': 'azure.cosmosdb',
'filters': [
{'type': 'value',
'key': 'name',
'op': 'glob',
'value_type': 'normalize',
'value': 'cctestcosmosdb*'}],
'actions': [
{'type': 'set-firewall-rules',
'append': False,
'bypass-rules': ['Portal', 'AzureCloud'],
'ip-rules': [ext_ip, '11.12.13.14', '21.22.23.24']
}
]
})
resources = p.run()
self.assertEqual(len(resources), 1)
self.assertEqual(1, len(update_mock.mock_calls))
name, args, kwargs = update_mock.mock_calls[0]
expected = set(['11.12.13.14', '21.22.23.24', ext_ip, get_azuredc_ip()])
expected.update(get_portal_ips())
actual = set([ip['ipAddressOrRange']
for ip in kwargs['create_update_parameters']['properties']['ipRules']])
self.assertEqual(resources[0]['resourceGroup'], args[0])
self.assertEqual(resources[0]['name'], args[1])
self.assertEqual(expected, actual)
@patch('azure.mgmt.cosmosdb.operations._database_accounts_operations.'
'DatabaseAccountsOperations.begin_create_or_update')
@arm_template('cosmosdb.json')
@cassette_name('firewall_action')
def test_set_ip_range_filter_remove_bypass(self, update_mock):
ext_ip = get_ext_ip()
p = self.load_policy({
'name': 'test-azure-cosmosdb',
'resource': 'azure.cosmosdb',
'filters': [
{'type': 'value',
'key': 'name',
'op': 'glob',
'value_type': 'normalize',
'value': 'cctestcosmosdb*'}],
'actions': [
{'type': 'set-firewall-rules',
'append': False,
'bypass-rules': [],
'ip-rules': [ext_ip, '21.22.23.24']
}
]
})
resources = p.run()
self.assertEqual(len(resources), 1)
self.assertEqual(1, len(update_mock.mock_calls))
name, args, kwargs = update_mock.mock_calls[0]
self.assertEqual(resources[0]['resourceGroup'], args[0])
self.assertEqual(resources[0]['name'], args[1])
expected = set(['21.22.23.24', ext_ip])
actual = set([ip['ipAddressOrRange']
for ip in kwargs['create_update_parameters']['properties']['ipRules']])
self.assertEqual(expected, actual)
@patch('azure.mgmt.cosmosdb.operations._database_accounts_operations.'
'DatabaseAccountsOperations.begin_create_or_update')
@arm_template('cosmosdb.json')
@cassette_name('firewall_action')
def test_set_vnet_append(self, update_mock):
p = self.load_policy({
'name': 'test-azure-cosmosdb',
'resource': 'azure.cosmosdb',
'filters': [
{'type': 'value',
'key': 'name',
'op': 'glob',
'value_type': 'normalize',
'value': 'cctestcosmosdb*'}],
'actions': [
{'type': 'set-firewall-rules',
'append': True,
'virtual-network-rules': ['id1', 'id2'],
'ip-rules': ['11.12.13.14', '21.22.23.24']
}
]
})
resources = p.run()
self.assertEqual(len(resources), 1)
name, args, kwargs = update_mock.mock_calls[0]
expected = set(['11.12.13.14', '21.22.23.24', get_ext_ip()])
expected.update(get_portal_ips())
actual = set([ip['ipAddressOrRange']
for ip in kwargs['create_update_parameters']['properties']['ipRules']])
self.assertEqual(resources[0]['resourceGroup'], args[0])
self.assertEqual(resources[0]['name'], args[1])
self.assertEqual(expected, actual)
self.assertEqual(
{'id1', 'id2'},
{r.id for r in
kwargs['create_update_parameters']['properties']['virtualNetworkRules']})
class CosmosDBThroughputActionsTest(BaseTest):
def setUp(self, *args, **kwargs):
super(CosmosDBThroughputActionsTest, self).setUp(*args, **kwargs)
self.client = local_session(Session).client(
'azure.mgmt.cosmosdb.CosmosDBManagementClient')
sub_id = local_session(Session).get_subscription_id()[-12:]
account_name = "cctestcosmosdb%s" % sub_id
key = CosmosDBChildResource.get_cosmos_key(
'test_cosmosdb', account_name, self.client, readonly=False)
self.data_client = CosmosClient(
url_connection='https://%s.documents.azure.com:443/' % account_name,
auth={
'masterKey': key
}
)
self.offer = None
def tearDown(self, *args, **kwargs):
super(CosmosDBThroughputActionsTest, self).tearDown(*args, **kwargs)
if self.offer:
self.offer['content']['offerThroughput'] = 400
self.data_client.ReplaceOffer(
self.offer['_self'],
self.offer
)
@cassette_name('test_replace_offer_collection_action')
def test_replace_offer_collection_action(self):
p = self.load_policy({
'name': 'test-azure-cosmosdb',
'resource': 'azure.cosmosdb-collection',
'filters': [
{
'type': 'value',
'key': 'id',
'op': 'eq',
'value': 'cccontainer'
},
{
'type': 'offer',
'key': 'content.offerThroughput',
'op': 'eq',
'value': 400
}
],
'actions': [
{
'type': 'replace-offer',
'throughput': 500
}
]
})
collections = p.run()
self.offer = collections[0]['c7n:offer']
self.assertEqual(len(collections), 1)
self._assert_offer_throughput_equals(500, collections[0]['_self'])
@cassette_name('test_restore_throughput_state_updates_throughput_from_tag')
def test_restore_throughput_state_updates_throughput_from_tag(self):
p1 = self.load_policy({
'name': 'test-azure-cosmosdb',
'resource': 'azure.cosmosdb-collection',
'filters': [
{
'type': 'value',
'key': 'id',
'op': 'eq',
'value': 'cccontainer'
}
],
'actions': [
{
'type': 'save-throughput-state',
'state-tag': 'test-restore-throughput'
}
]
})
collections = p1.run()
self.assertEqual(len(collections), 1)
collection_offer = collections[0]['c7n:offer']
self.offer = collection_offer
throughput_to_restore = collection_offer['content']['offerThroughput']
collection_offer['content']['offerThroughput'] = throughput_to_restore + 100
self.data_client.ReplaceOffer(
collection_offer['_self'],
collection_offer
)
self._assert_offer_throughput_equals(throughput_to_restore + 100, collections[0]['_self'])
p2 = self.load_policy({
'name': 'test-azure-cosmosdb',
'resource': 'azure.cosmosdb-collection',
'filters': [
{
'type': 'value',
'key': 'id',
'op': 'eq',
'value': 'cccontainer'
},
],
'actions': [
{
'type': 'restore-throughput-state',
'state-tag': 'test-restore-throughput'
}
]
})
collections = p2.run()
self.assertEqual(len(collections), 1)
self._assert_offer_throughput_equals(throughput_to_restore, collections[0]['_self'])
def _assert_offer_throughput_equals(self, throughput, resource_self):
self.sleep_in_live_mode()
offers = self.data_client.ReadOffers()
offer = next((o for o in offers if o['resource'] == resource_self), None)
self.assertIsNotNone(offer)
self.assertEqual(throughput, offer['content']['offerThroughput'])
|
|
# Copyright (c) 2015 ARM Limited
# All rights reserved.
#
# The license below extends only to copyright in the software and shall
# not be construed as granting a license to any other intellectual
# property including but not limited to intellectual property relating
# to a hardware implementation of the functionality of the software
# licensed hereunder. You may use the software subject to the license
# terms below provided that you ensure that this notice is replicated
# unmodified and in its entirety in all distributions of the software,
# modified or unmodified, in source code or in binary form.
#
# Copyright (c) 2005-2007 The Regents of The University of Michigan
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met: redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer;
# redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution;
# neither the name of the copyright holders nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
# Authors: Nathan Binkert
from m5.SimObject import SimObject
from m5.params import *
from m5.proxy import *
from PciDevice import PciDevice
class EtherObject(SimObject):
type = 'EtherObject'
abstract = True
cxx_header = "dev/net/etherobject.hh"
class EtherLink(EtherObject):
type = 'EtherLink'
cxx_header = "dev/net/etherlink.hh"
int0 = SlavePort("interface 0")
int1 = SlavePort("interface 1")
delay = Param.Latency('0us', "packet transmit delay")
delay_var = Param.Latency('0ns', "packet transmit delay variability")
speed = Param.NetworkBandwidth('1Gbps', "link speed")
dump = Param.EtherDump(NULL, "dump object")
class DistEtherLink(EtherObject):
type = 'DistEtherLink'
cxx_header = "dev/net/dist_etherlink.hh"
int0 = SlavePort("interface 0")
delay = Param.Latency('0us', "packet transmit delay")
delay_var = Param.Latency('0ns', "packet transmit delay variability")
speed = Param.NetworkBandwidth('1Gbps', "link speed")
dump = Param.EtherDump(NULL, "dump object")
dist_rank = Param.UInt32('0', "Rank of this gem5 process (dist run)")
dist_size = Param.UInt32('1', "Number of gem5 processes (dist run)")
sync_start = Param.Latency('5200000000000t', "first dist sync barrier")
sync_repeat = Param.Latency('10us', "dist sync barrier repeat")
server_name = Param.String('localhost', "Message server name")
server_port = Param.UInt32('2200', "Message server port")
is_switch = Param.Bool(False, "true if this a link in etherswitch")
dist_sync_on_pseudo_op = Param.Bool(False, "Start sync with pseudo_op")
num_nodes = Param.UInt32('2', "Number of simulate nodes")
class EtherBus(EtherObject):
type = 'EtherBus'
cxx_header = "dev/net/etherbus.hh"
loopback = Param.Bool(True, "send packet back to the sending interface")
dump = Param.EtherDump(NULL, "dump object")
speed = Param.NetworkBandwidth('100Mbps', "bus speed in bits per second")
class EtherSwitch(EtherObject):
type = 'EtherSwitch'
cxx_header = "dev/net/etherswitch.hh"
dump = Param.EtherDump(NULL, "dump object")
fabric_speed = Param.NetworkBandwidth('10Gbps', "switch fabric speed in bits "
"per second")
interface = VectorMasterPort("Ethernet Interface")
output_buffer_size = Param.MemorySize('1MB', "size of output port buffers")
delay = Param.Latency('0us', "packet transmit delay")
delay_var = Param.Latency('0ns', "packet transmit delay variability")
time_to_live = Param.Latency('10ms', "time to live of MAC address maping")
class EtherTap(EtherObject):
type = 'EtherTap'
cxx_header = "dev/net/ethertap.hh"
bufsz = Param.Int(10000, "tap buffer size")
dump = Param.EtherDump(NULL, "dump object")
port = Param.UInt16(3500, "tap port")
class EtherDump(SimObject):
type = 'EtherDump'
cxx_header = "dev/net/etherdump.hh"
file = Param.String("dump file")
maxlen = Param.Int(96, "max portion of packet data to dump")
class EtherDevice(PciDevice):
type = 'EtherDevice'
abstract = True
cxx_header = "dev/net/etherdevice.hh"
interface = MasterPort("Ethernet Interface")
class IGbE(EtherDevice):
# Base class for two IGbE adapters listed above
type = 'IGbE'
cxx_header = "dev/net/i8254xGBe.hh"
hardware_address = Param.EthernetAddr(NextEthernetAddr,
"Ethernet Hardware Address")
rx_fifo_size = Param.MemorySize('384kB', "Size of the rx FIFO")
tx_fifo_size = Param.MemorySize('384kB', "Size of the tx FIFO")
rx_desc_cache_size = Param.Int(64,
"Number of enteries in the rx descriptor cache")
tx_desc_cache_size = Param.Int(64,
"Number of enteries in the rx descriptor cache")
VendorID = 0x8086
SubsystemID = 0x1008
SubsystemVendorID = 0x8086
Status = 0x0000
SubClassCode = 0x00
ClassCode = 0x02
ProgIF = 0x00
BAR0 = 0x00000000
BAR1 = 0x00000000
BAR2 = 0x00000000
BAR3 = 0x00000000
BAR4 = 0x00000000
BAR5 = 0x00000000
MaximumLatency = 0x00
MinimumGrant = 0xff
InterruptLine = 0x1e
InterruptPin = 0x01
BAR0Size = '128kB'
wb_delay = Param.Latency('10ns', "delay before desc writeback occurs")
fetch_delay = Param.Latency('10ns', "delay before desc fetch occurs")
fetch_comp_delay = Param.Latency('10ns', "delay after desc fetch occurs")
wb_comp_delay = Param.Latency('10ns', "delay after desc wb occurs")
tx_read_delay = Param.Latency('0ns', "delay after tx dma read")
rx_write_delay = Param.Latency('0ns', "delay after rx dma read")
phy_pid = Param.UInt16("Phy PID that corresponds to device ID")
phy_epid = Param.UInt16("Phy EPID that corresponds to device ID")
class IGbE_e1000(IGbE):
# Older Intel 8254x based gigabit ethernet adapter
# Uses Intel e1000 driver
DeviceID = 0x1075
phy_pid = 0x02A8
phy_epid = 0x0380
class IGbE_igb(IGbE):
# Newer Intel 8257x based gigabit ethernet adapter
# Uses Intel igb driver and in theory supports packet splitting and LRO
DeviceID = 0x10C9
phy_pid = 0x0141
phy_epid = 0x0CC0
class EtherDevBase(EtherDevice):
type = 'EtherDevBase'
abstract = True
cxx_header = "dev/net/etherdevice.hh"
hardware_address = Param.EthernetAddr(NextEthernetAddr,
"Ethernet Hardware Address")
dma_read_delay = Param.Latency('0us', "fixed delay for dma reads")
dma_read_factor = Param.Latency('0us', "multiplier for dma reads")
dma_write_delay = Param.Latency('0us', "fixed delay for dma writes")
dma_write_factor = Param.Latency('0us', "multiplier for dma writes")
rx_delay = Param.Latency('1us', "Receive Delay")
tx_delay = Param.Latency('1us', "Transmit Delay")
rx_fifo_size = Param.MemorySize('512kB', "max size of rx fifo")
tx_fifo_size = Param.MemorySize('512kB', "max size of tx fifo")
rx_filter = Param.Bool(True, "Enable Receive Filter")
intr_delay = Param.Latency('10us', "Interrupt propagation delay")
rx_thread = Param.Bool(False, "dedicated kernel thread for transmit")
tx_thread = Param.Bool(False, "dedicated kernel threads for receive")
rss = Param.Bool(False, "Receive Side Scaling")
class NSGigE(EtherDevBase):
type = 'NSGigE'
cxx_header = "dev/net/ns_gige.hh"
dma_data_free = Param.Bool(False, "DMA of Data is free")
dma_desc_free = Param.Bool(False, "DMA of Descriptors is free")
dma_no_allocate = Param.Bool(True, "Should we allocate cache on read")
VendorID = 0x100B
DeviceID = 0x0022
Status = 0x0290
SubClassCode = 0x00
ClassCode = 0x02
ProgIF = 0x00
BAR0 = 0x00000001
BAR1 = 0x00000000
BAR2 = 0x00000000
BAR3 = 0x00000000
BAR4 = 0x00000000
BAR5 = 0x00000000
MaximumLatency = 0x34
MinimumGrant = 0xb0
InterruptLine = 0x1e
InterruptPin = 0x01
BAR0Size = '256B'
BAR1Size = '4kB'
class Sinic(EtherDevBase):
type = 'Sinic'
cxx_class = 'Sinic::Device'
cxx_header = "dev/net/sinic.hh"
rx_max_copy = Param.MemorySize('1514B', "rx max copy")
tx_max_copy = Param.MemorySize('16kB', "tx max copy")
rx_max_intr = Param.UInt32(10, "max rx packets per interrupt")
rx_fifo_threshold = Param.MemorySize('384kB', "rx fifo high threshold")
rx_fifo_low_mark = Param.MemorySize('128kB', "rx fifo low threshold")
tx_fifo_high_mark = Param.MemorySize('384kB', "tx fifo high threshold")
tx_fifo_threshold = Param.MemorySize('128kB', "tx fifo low threshold")
virtual_count = Param.UInt32(1, "Virtualized SINIC")
zero_copy_size = Param.UInt32(64, "Bytes to copy if below threshold")
zero_copy_threshold = Param.UInt32(256,
"Only zero copy above this threshold")
zero_copy = Param.Bool(False, "Zero copy receive")
delay_copy = Param.Bool(False, "Delayed copy transmit")
virtual_addr = Param.Bool(False, "Virtual addressing")
VendorID = 0x1291
DeviceID = 0x1293
Status = 0x0290
SubClassCode = 0x00
ClassCode = 0x02
ProgIF = 0x00
BAR0 = 0x00000000
BAR1 = 0x00000000
BAR2 = 0x00000000
BAR3 = 0x00000000
BAR4 = 0x00000000
BAR5 = 0x00000000
MaximumLatency = 0x34
MinimumGrant = 0xb0
InterruptLine = 0x1e
InterruptPin = 0x01
BAR0Size = '64kB'
|
|
import ConfigParser
from datetime import datetime, timedelta
from boto.ec2.image import Image
from boto.ec2.instance import Instance
from boto.ec2.keypair import KeyPair
from .clcinterface import ClcInterface
# This class provides an implmentation of the clcinterface that caches responses
# from the underlying clcinterface. It will only make requests to the underlying layer
# at the rate defined by pollfreq. It is assumed this will be created per-session and
# therefore will only contain data for a single user. If a more global cache is desired,
# some things will need to be re-written.
class CachingClcInterface(ClcInterface):
clc = None
zones = None
zoneUpdate = datetime.min
zoneFreq = 0
images = None
imageUpdate = datetime.min
imageFreq = 0
instances = None
instanceUpdate = datetime.min
instanceFreq = 0
addresses = None
addressUpdate = datetime.min
addressFreq = 0
keypairs = None
keypairUpdate = datetime.min
keypairFreq = 0
groups = None
groupUpdate = datetime.min
groupFreq = 0
volumes = None
volumeUpdate = datetime.min
volumeFreq = 0
snapshots = None
snapshotUpdate = datetime.min
snapshotFreq = 0
# load saved state to simulate CLC
def __init__(self, clcinterface, config):
self.clc = clcinterface
pollfreq = config.getint('server', 'pollfreq')
try:
self.zoneFreq = config.getint('server', 'pollfreq.zones')
except ConfigParser.NoOptionError:
self.zoneFreq = pollfreq
try:
self.imageFreq = config.getint('server', 'pollfreq.images')
except ConfigParser.NoOptionError:
self.imageFreq = pollfreq
try:
self.instanceFreq = config.getint('server', 'pollfreq.instances')
except ConfigParser.NoOptionError:
self.instanceFreq = pollfreq
try:
self.keypairFreq = config.getint('server', 'pollfreq.keypairs')
except ConfigParser.NoOptionError:
self.keypairFreq = pollfreq
try:
self.groupFreq = config.getint('server', 'pollfreq.groups')
except ConfigParser.NoOptionError:
self.groupFreq = pollfreq
try:
self.addressFreq = config.getint('server', 'pollfreq.addresses')
except ConfigParser.NoOptionError:
self.addressFreq = pollfreq
try:
self.volumeFreq = config.getint('server', 'pollfreq.volumes')
except ConfigParser.NoOptionError:
self.volumeFreq = pollfreq
try:
self.snapshotFreq = config.getint('server', 'pollfreq.snapshots')
except ConfigParser.NoOptionError:
self.snapshotFreq = pollfreq
def get_all_zones(self):
# if cache stale, update it
if (datetime.now() - self.zoneUpdate) > timedelta(seconds = self.zoneFreq):
self.zones = self.clc.get_all_zones()
self.zoneUpdate = datetime.now()
return self.zones
def get_all_images(self, owners):
# if (datetime.now() - self.imageUpdate) > timedelta(seconds = self.imageFreq):
self.images = self.clc.get_all_images(owners)
# self.imageUpdate = datetime.now()
return self.images
# returns list of image attributes
def get_image_attribute(self, image_id, attribute):
return self.clc.get_image_attribute(image_id, attribute)
# returns True if successful
def modify_image_attribute(self, image_id, attribute, operation, users, groups):
self.imageUpdate = datetime.min # invalidate cache
return self.clc.modify_image_attribute(image_id, attribute, operation, users, groups)
# returns True if successful
def reset_image_attribute(self, image_id, attribute):
self.imageUpdate = datetime.min # invalidate cache
return self.clc.reset_image_attribute(image_id, attribute)
def get_all_instances(self):
if (datetime.now() - self.instanceUpdate) > timedelta(seconds = self.instanceFreq):
self.instances = self.clc.get_all_instances()
self.instanceUpdate = datetime.now()
return self.instances
def run_instances(self, image_id, min_count=1, max_count=1,
key_name=None, security_groups=None,
user_data=None, addressing_type=None,
instance_type='m1.small', placement=None,
kernel_id=None, ramdisk_id=None,
monitoring_enabled=False, subnet_id=None,
block_device_map=None,
disable_api_termination=False,
instance_initiated_shutdown_behavior=None,
private_ip_address=None,
placement_group=None, client_token=None,
security_group_ids=None,
additional_info=None, instance_profile_name=None,
instance_profile_arn=None, tenancy=None):
self.instanceUpdate = datetime.min # invalidate cache
return self.clc.run_instances(image_id, min_count, max_count,
key_name, security_groups,
user_data, addressing_type,
instance_type, placement,
kernel_id, ramdisk_id,
monitoring_enabled, subnet_id,
block_device_map,
disable_api_termination,
instance_initiated_shutdown_behavior,
private_ip_address,
placement_group, client_token,
security_group_ids,
additional_info, instance_profile_name,
instance_profile_arn, tenancy)
# returns instance list
def terminate_instances(self, instance_ids):
self.instanceUpdate = datetime.min # invalidate cache
return self.clc.terminate_instances(instance_ids)
# returns instance list
def stop_instances(self, instance_ids, force=False):
self.instanceUpdate = datetime.min # invalidate cache
return self.clc.stop_instances(instance_ids, force)
# returns instance list
def start_instances(self, instance_ids):
self.instanceUpdate = datetime.min # invalidate cache
return self.clc.start_instances(instance_ids)
# returns instance status
def reboot_instances(self, instance_ids):
self.instanceUpdate = datetime.min # invalidate cache
return self.clc.reboot_instances(instance_ids)
# returns console output
def get_console_output(self, instance_id):
return self.clc.get_console_output(instance_id)
# returns password data
def get_password_data(self, instance_id):
return self.clc.get_password_data(instance_id)
def get_all_addresses(self):
if (datetime.now() - self.addressUpdate) > timedelta(seconds = self.addressFreq):
self.addresses = self.clc.get_all_addresses()
self.addressUpdate = datetime.now()
return self.addresses
# returns address info
def allocate_address(self):
self.addressUpdate = datetime.min # invalidate cache
return self.clc.allocate_address()
# returns True if successful
def release_address(self, publicip):
self.addressUpdate = datetime.min # invalidate cache
return self.clc.release_address(publicip)
# returns True if successful
def associate_address(self, publicip, instanceid):
self.addressUpdate = datetime.min # invalidate cache
return self.clc.associate_address(publicip, instanceid)
# returns True if successful
def disassociate_address(self, publicip):
self.addressUpdate = datetime.min # invalidate cache
return self.clc.disassociate_address(publicip)
def get_all_key_pairs(self):
if (datetime.now() - self.keypairUpdate) > timedelta(seconds = self.keypairFreq):
self.keypairs = self.clc.get_all_key_pairs()
self.keypairUpdate = datetime.now()
return self.keypairs
# returns keypair info and key
def create_key_pair(self, key_name):
self.keypairUpdate = datetime.min # invalidate cache
return self.clc.create_key_pair(key_name)
# returns nothing
def delete_key_pair(self, key_name):
self.keypairUpdate = datetime.min # invalidate cache
return self.clc.delete_key_pair(key_name)
# returns keypair info and key
def import_key_pair(self, key_name, public_key_material):
self.keypairUpdate = datetime.min # invalidate cache
return self.clc.import_key_pair(key_name, public_key_material)
def get_all_security_groups(self):
if (datetime.now() - self.groupUpdate) > timedelta(seconds = self.groupFreq):
self.groups = self.clc.get_all_security_groups()
self.groupUpdate = datetime.now()
return self.groups
# returns True if successful
def create_security_group(self, name, description):
self.groupUpdate = datetime.min # invalidate cache
return self.clc.create_security_group(name, description)
# returns True if successful
def delete_security_group(self, name=None, group_id=None):
self.groupUpdate = datetime.min # invalidate cache
return self.clc.delete_security_group(name, group_id)
# returns True if successful
def authorize_security_group(self, name=None,
src_security_group_name=None,
src_security_group_owner_id=None,
ip_protocol=None, from_port=None, to_port=None,
cidr_ip=None, group_id=None,
src_security_group_group_id=None):
self.groupUpdate = datetime.min # invalidate cache
return self.clc.authorize_security_group(name,
src_security_group_name,
src_security_group_owner_id,
ip_protocol, from_port, to_port,
cidr_ip, group_id,
src_security_group_group_id)
# returns True if successful
def revoke_security_group(self, name=None,
src_security_group_name=None,
src_security_group_owner_id=None,
ip_protocol=None, from_port=None, to_port=None,
cidr_ip=None, group_id=None,
src_security_group_group_id=None):
self.groupUpdate = datetime.min # invalidate cache
return self.clc.revoke_security_group(name,
src_security_group_name,
src_security_group_owner_id,
ip_protocol, from_port, to_port,
cidr_ip, group_id,
src_security_group_group_id)
def get_all_volumes(self):
if (datetime.now() - self.volumeUpdate) > timedelta(seconds = self.volumeFreq):
self.volumes = self.clc.get_all_volumes()
self.volumeUpdate = datetime.now()
return self.volumes
# returns volume info
def create_volume(self, size, availability_zone, snapshot_id):
self.volumeUpdate = datetime.min # invalidate cache
return self.clc.create_volume(size, availability_zone, snapshot_id)
# returns True if successful
def delete_volume(self, volume_id):
self.volumeUpdate = datetime.min # invalidate cache
return self.clc.delete_volume(volume_id)
# returns True if successful
def attach_volume(self, volume_id, instance_id, device):
self.volumeUpdate = datetime.min # invalidate cache
return self.clc.attach_volume(volume_id, instance_id, device)
# returns True if successful
def detach_volume(self, volume_id, force=False):
self.volumeUpdate = datetime.min # invalidate cache
return self.clc.detach_volume(volume_id, force)
def get_all_snapshots(self):
if (datetime.now() - self.snapshotUpdate) > timedelta(seconds = self.snapshotFreq):
self.snapshots = self.clc.get_all_snapshots()
self.snapshotUpdate = datetime.now()
return self.snapshots
# returns snapshot info
def create_snapshot(self, volume_id, description):
self.snapshotUpdate = datetime.min # invalidate cache
return self.clc.create_snapshot(volume_id, description)
# returns True if successful
def delete_snapshot(self, snapshot_id):
self.snapshotUpdate = datetime.min # invalidate cache
return self.clc.delete_snapshot(snapshot_id)
# returns list of snapshots attributes
def get_snapshot_attribute(self, snapshot_id, attribute):
self.snapshotUpdate = datetime.min # invalidate cache
return self.clc.get_snapshot_attribute(snapshot_id, attribute)
# returns True if successful
def modify_snapshot_attribute(self, snapshot_id, attribute, operation, users, groups):
self.snapshotUpdate = datetime.min # invalidate cache
return self.clc.modify_snapshot_attribute(snapshot_id, attribute, operation, users, groups)
# returns True if successful
def reset_snapshot_attribute(self, snapshot_id, attribute):
self.snapshotUpdate = datetime.min # invalidate cache
return self.clc.reset_snapshot_attribute(snapshot_id, attribute)
# returns True if successful
def register_image(self, name, image_location=None, description=None, architecture=None, kernel_id=None, ramdisk_id=None, root_dev_name=None, block_device_map=None):
self.imageUpdate = datetime.min # invalidate cache
return self.clc.register_image(name, image_location, description, architecture, kernel_id, ramdisk_id, root_dev_name, block_device_map)
|
|
import numpy as np
import scipy.stats as ss
import scipy.special as sp
from .family import Family
from .flat import Flat
from .normal import Normal
from .gas_recursions import gas_recursion_cauchy_orderone, gas_recursion_cauchy_ordertwo
from .gas_recursions import gasx_recursion_cauchy_orderone, gasx_recursion_cauchy_ordertwo
from .gas_recursions import gas_llev_recursion_cauchy_orderone, gas_llev_recursion_cauchy_ordertwo
from .gas_recursions import gas_llt_recursion_cauchy_orderone, gas_llt_recursion_cauchy_ordertwo
from .gas_recursions import gas_reg_recursion_cauchy_orderone, gas_reg_recursion_cauchy_ordertwo
class Cauchy(Family):
"""
Cauchy Distribution
----
This class contains methods relating to the Cauchy distribution for time series.
"""
def __init__(self, loc=0.0, scale=1.0, transform=None, **kwargs):
"""
Parameters
----------
loc : float
Location parameter for the Cauchy distribution
sigma : float
Dispersion parameter for the Cauchy distribution
transform : str
Whether to apply a transformation - e.g. 'exp' or 'logit'
"""
super(Cauchy, self).__init__(transform)
self.loc0 = loc
self.scale0 = scale
self.covariance_prior = False
self.gradient_only = kwargs.get('gradient_only', False) # used for GAS t models
if self.gradient_only is True:
self.score_function = self.first_order_score
else:
self.score_function = self.second_order_score
def approximating_model(self, beta, T, Z, R, Q, h_approx, data):
""" Creates approximating Gaussian state space model for the Cauchy measurement density
Parameters
----------
beta : np.array
Contains untransformed starting values for latent variables
T, Z, R, Q : np.array
State space matrices used in KFS algorithm
h_approx : float
The variance of the measurement density
data: np.array
The univariate time series data
Returns
----------
H : np.array
Approximating measurement variance matrix
mu : np.array
Approximating measurement constants
"""
H = np.ones(data.shape[0])*h_approx
mu = np.zeros(data.shape[0])
return H, mu
def approximating_model_reg(self, beta, T, Z, R, Q, h_approx, data, X, state_no):
""" Creates approximating Gaussian state space model for the Cauchy measurement density
Parameters
----------
beta : np.array
Contains untransformed starting values for latent variables
T, Z, R, Q : np.array
State space matrices used in KFS algorithm
h_approx : float
The variance of the measurement density
data: np.array
The univariate time series data
X: np.array
The regressors
state_no : int
Number of states
Returns
----------
H : np.array
Approximating measurement variance matrix
mu : np.array
Approximating measurement constants
"""
H = np.ones(data.shape[0])*h_approx
mu = np.zeros(data.shape[0])
return H, mu
@staticmethod
def build_latent_variables():
""" Builds additional latent variables for this family in a probabilistic model
Returns
----------
- A list of lists (each sub-list contains latent variable information)
"""
lvs_to_build = []
lvs_to_build.append(['Cauchy Scale', Flat(transform='exp'), Normal(0, 3), 0.0])
return lvs_to_build
@staticmethod
def draw_variable(loc, scale, shape, skewness, nsims):
""" Draws random variables from this distribution
Parameters
----------
loc : float
location parameter for the distribution
scale : float
scale parameter for the distribution
shape : float
tail thickness parameter for the distribution
skewness : float
skewness parameter for the distribution
nsims : int or list
number of draws to take from the distribution
Returns
----------
- Random draws from the distribution
"""
return ss.cauchy.rvs(loc, scale, nsims)
def logpdf(self, mu):
"""
Log PDF for Cauchy prior
Parameters
----------
mu : float
Latent variable for which the prior is being formed over
Returns
----------
- log(p(mu))
"""
if self.transform is not None:
mu = self.transform(mu)
return ss.cauchy.logpdf(mu, self.loc0, self.scale0)
def pdf(self, mu):
"""
PDF for Cauchy prior
Parameters
----------
mu : float
Latent variable for which the prior is being formed over
Returns
----------
- p(mu)
"""
return ss.cauchy.pdf(mu, self.loc0, self.scale0)
@staticmethod
def markov_blanket(y, mean, scale, shape, skewness):
""" Markov blanket for each likelihood term - used for state space models
Parameters
----------
y : np.ndarray
univariate time series
mean : np.ndarray
array of location parameters for the Cauchy distribution
scale : float
scale parameter for the Cauchy distribution
shape : float
tail thickness parameter for the Cauchy distribution
skewness : float
skewness parameter for the Cauchy distribution
Returns
----------
- Markov blanket of the Cauchy family
"""
return ss.cauchy.logpdf(y, loc=mean, scale=scale)
@staticmethod
def setup():
""" Returns the attributes of this family if using in a probabilistic model
Notes
----------
- scale notes whether family has a variance parameter (sigma)
- shape notes whether family has a tail thickness parameter (nu)
- skewness notes whether family has a skewness parameter (gamma)
- mean_transform is a function which transforms the location parameter
- cythonized notes whether the family has cythonized routines
Returns
----------
- model name, link function, scale, shape, skewness, mean_transform, cythonized
"""
name = "Cauchy"
link = np.array
scale = True
shape = False
skewness = False
mean_transform = np.array
cythonized = True # used for GAS models
return name, link, scale, shape, skewness, mean_transform, cythonized
@staticmethod
def first_order_score(y, mean, scale, shape, skewness):
""" GAS t Update term using gradient only - native Python function
Parameters
----------
y : float
datapoint for the time series
mean : float
location parameter for the t distribution
scale : float
scale parameter for the t distribution
shape : float
tail thickness parameter for the t distribution
skewness : float
skewness parameter for the t distribution
Returns
----------
- Score of the t family
"""
return (2.0*(y-mean)/(np.power(scale,2) + (np.power(y-mean,2))))
@staticmethod
def second_order_score(y, mean, scale, shape, skewness):
""" GAS t Update term potentially using second-order information - native Python function
Parameters
----------
y : float
datapoint for the time series
mean : float
location parameter for the t distribution
scale : float
scale parameter for the t distribution
shape : float
tail thickness parameter for the t distribution
skewness : float
skewness parameter for the t distribution
Returns
----------
- Adjusted score of the t family
"""
return 2.0*(y-mean)/(np.power(scale,2) + (np.power(y-mean,2)))/(2.0*((np.power(scale,2)) - np.power(y-mean,2))/np.power((np.power(scale,2)) + np.power(y-mean,2),2))
@staticmethod
def neg_loglikelihood(y, mean, scale, shape, skewness):
""" Negative loglikelihood function for this distribution
Parameters
----------
y : np.ndarray
univariate time series
mean : np.ndarray
array of location parameters for the Cauchy distribution
scale : float
scale parameter for the Cauchy distribution
shape : float
tail thickness parameter for the Cauchy distribution
skewness : float
skewness parameter for the Cauchy distribution
Returns
----------
- Negative loglikelihood of the Cauchy family
"""
return -np.sum(ss.cauchy.logpdf(y, loc=mean, scale=scale))
@staticmethod
def reg_score_function(X, y, mean, scale, shape, skewness):
""" GAS Cauchy Regression Update term using gradient only - native Python function
Parameters
----------
X : float
datapoint for the right hand side variable
y : float
datapoint for the time series
mean : float
location parameter for the Cauchy distribution
scale : float
scale parameter for the Cauchy distribution
shape : float
tail thickness parameter for the Cauchy distribution
skewness : float
skewness parameter for the Cauchy distribution
Returns
----------
- Score of the Cauchy family
"""
return 2.0*((y-mean)*X)/(np.power(scale,2)+np.power((y-mean),2))
@staticmethod
def second_order_score(y, mean, scale, shape, skewness):
""" GAS Cauchy Update term potentially using second-order information - native Python function
Parameters
----------
y : float
datapoint for the time series
mean : float
location parameter for the Cauchy distribution
scale : float
scale parameter for the Cauchy distribution
shape : float
tail thickness parameter for the Cauchy distribution
skewness : float
skewness parameter for the Cauchy distribution
Returns
----------
- Adjusted score of the Cauchy family
"""
return 2.0*(y-mean)/(np.power(scale,2) + (np.power(y-mean,2)))/(2.0*((np.power(scale,2)) - np.power(y-mean,2))/np.power((np.power(scale,2)) + np.power(y-mean,2),2))
# Optional Cythonized recursions below for GAS Cauchy models
@staticmethod
def gradient_recursion():
""" GAS Cauchy Model Recursion - gradient only
Returns
----------
- Recursion function for GAS Cauchy model - gradient only
"""
return gas_recursion_cauchy_orderone
@staticmethod
def newton_recursion():
""" GAS Cauchy Model Recursion - adjusted score
Returns
----------
- Recursion function for GAS Cauchy model - adjusted score
"""
return gas_recursion_cauchy_ordertwo
@staticmethod
def gradientx_recursion():
""" GASX Cauchy Model Recursion - gradient only
Returns
----------
- Recursion function for GASX Cauchy model - gradient only
"""
return gasx_recursion_cauchy_orderone
@staticmethod
def newtonx_recursion():
""" GASX Cauchy Model Recursion - adjusted score
Returns
----------
- Recursion function for GASX Cauchy model - adjusted score
"""
return gasx_recursion_cauchy_ordertwo
@staticmethod
def gradientllev_recursion():
""" GAS Local Level Cauchy Model Recursion - gradient only
Returns
----------
- Recursion function for GAS Local Level Cauchy model - gradient only
"""
return gas_llev_recursion_cauchy_orderone
@staticmethod
def newtonllev_recursion():
""" GAS Local Level Cauchy Model Recursion - adjusted score
Returns
----------
- Recursion function for GAS Local Level Cauchy model - adjusted score
"""
return gas_llev_recursion_cauchy_ordertwo
@staticmethod
def gradientllt_recursion():
""" GAS Local Linear Trend Cauchy Model Recursion - gradient only
Returns
----------
- Recursion function for GAS Local Linear Trend Cauchy model - gradient only
"""
return gas_llt_recursion_cauchy_orderone
@staticmethod
def newtonllt_recursion():
""" GAS Local Linear Trend Cauchy Model Recursion - adjusted score
Returns
----------
- Recursion function for GAS Local Linear Trend Cauchy model - adjusted score
"""
return gas_llt_recursion_cauchy_ordertwo
@staticmethod
def gradientreg_recursion():
""" GAS Dynamic Regression Cauchy Model Recursion - gradient only
Returns
----------
- Recursion function for GAS Dynamic Regression Cauchy model - gradient only
"""
return gas_reg_recursion_cauchy_orderone
@staticmethod
def newtonreg_recursion():
""" GAS Dynamic Regression Cauchy Model Recursion - adjusted score
Returns
----------
- Recursion function for GAS Dynamic Regression Cauchy model - adjusted score
"""
return gas_reg_recursion_cauchy_ordertwo
|
|
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Libcloud Python 2.x and 3.x compatibility layer
# Some methods below are taken from Django PYK3 port which is licensed under 3
# clause BSD license
# https://bitbucket.org/loewis/django-3k
# pylint: disable=import-error,no-member
from __future__ import absolute_import
import sys
import types
import unittest
DEFAULT_LXML = False
try:
if DEFAULT_LXML:
from lxml import etree as ET
else:
from xml.etree import ElementTree as ET
except ImportError:
from xml.etree import ElementTree as ET
PY2 = sys.version_info[0] == 2
PY3 = sys.version_info[0] == 3
PY2_pre_279 = PY2 and sys.version_info < (2, 7, 9)
PY2 = False
PY27 = False
PY3 = False
if (2, 0) <= sys.version_info < (3, 0):
PY2 = True
if (2, 7) <= sys.version_info < (2, 8):
PY27 = True
if sys.version_info >= (3, 0):
PY3 = True
if PY2_pre_279:
try:
from backports.ssl_match_hostname import match_hostname, CertificateError # NOQA
except ImportError:
import warnings
warnings.warn("Missing backports.ssl_match_hostname package")
else:
# ssl module in Python >= 3.2 includes match hostname function
from ssl import match_hostname, CertificateError # NOQA
if PY3:
import http.client as httplib
from io import StringIO
import urllib
import urllib as urllib2
import base64
# pylint: disable=no-name-in-module
import urllib.parse as urlparse
import xmlrpc.client as xmlrpclib
from urllib.parse import quote as urlquote
from urllib.parse import unquote as urlunquote
from urllib.parse import urlencode as urlencode
from os.path import relpath
if sys.version_info >= (3, 5, 0):
from importlib import reload
else:
from imp import reload
from builtins import bytes
from builtins import next
parse_qs = urlparse.parse_qs
parse_qsl = urlparse.parse_qsl
basestring = str
def method_type(callable, instance, klass):
return types.MethodType(callable, instance or klass())
def b(s):
if isinstance(s, str):
return s.encode('utf-8')
elif isinstance(s, bytes):
return s
elif isinstance(s, int):
return bytes([s])
else:
raise TypeError("Invalid argument %r for b()" % (s,))
def ensure_string(s):
if isinstance(s, str):
return s
elif isinstance(s, bytes):
return s.decode('utf-8')
else:
raise TypeError("Invalid argument %r for ensure_string()" % (s,))
def byte(n):
# assume n is a Latin-1 string of length 1
return ord(n)
_real_unicode = str
u = str
def bchr(s):
"""Take an integer and make a 1-character byte string."""
return bytes([s])
def dictvalues(d):
return list(d.values())
def tostring(node):
return ET.tostring(node, encoding='unicode')
def hexadigits(s):
# s needs to be a byte string.
return [format(x, "02x") for x in s]
if sys.version_info >= (3, 1, 0):
# encodestring and decodestring has been deprecated since 3.1.0
def base64_encode_string(*args, **kwargs):
return base64.encodebytes(*args, **kwargs) # NOQA
def base64_decode_string(*args, **kwargs):
return base64.decodebytes(*args, **kwargs) # NOQA
else:
def base64_encode_string(*args, **kwargs):
return base64.encodestring(*args, **kwargs)
def base64_decode_string(*args, **kwargs):
return base64.decodestring(*args, **kwargs)
def assertRaisesRegex(self, *args, **kwargs):
if not isinstance(self, unittest.TestCase):
raise ValueError('First argument "self" needs to be an instance '
'of unittest.TestCase')
return getattr(self, 'assertRaisesRegex')(*args, **kwargs)
def assertRegex(self, *args, **kwargs):
if not isinstance(self, unittest.TestCase):
raise ValueError('First argument "self" needs to be an instance '
'of unittest.TestCase')
return getattr(self, 'assertRegex')(*args, **kwargs)
else:
import httplib # NOQA
from StringIO import StringIO # NOQA
import urllib # NOQA
import urllib2 # NOQA
import urlparse # NOQA
import xmlrpclib # NOQA
import base64 # NOQA
from urllib import quote as _urlquote # NOQA
from urllib import unquote as urlunquote # NOQA
from urllib import urlencode as urlencode # NOQA
from __builtin__ import reload # NOQA
parse_qs = urlparse.parse_qs
parse_qsl = urlparse.parse_qsl
from os.path import relpath # NOQA
# Save the real value of unicode because urlquote needs it to tell the
# difference between a unicode string and a byte string.
_real_unicode = unicode
basestring = unicode = str
method_type = types.MethodType
b = bytes = ensure_string = str
def byte(n):
return n
u = unicode
def bchr(s):
"""Take an integer and make a 1-character byte string."""
return chr(s)
_default_value_next = object()
def next(iterator, default=_default_value_next):
try:
return iterator.next()
except StopIteration:
if default is _default_value_next:
raise
return default
def dictvalues(d):
return d.values()
tostring = ET.tostring
def urlquote(s, safe='/'):
if isinstance(s, _real_unicode):
# Pretend to be py3 by encoding the URI automatically.
s = s.encode('utf8')
return _urlquote(s, safe)
def hexadigits(s):
# s needs to be a string.
return [x.encode("hex") for x in s]
def base64_encode_string(*args, **kwargs):
return base64.encodestring(*args, **kwargs)
def base64_decode_string(*args, **kwargs):
return base64.decodestring(*args, **kwargs)
def assertRaisesRegex(self, *args, **kwargs):
if not isinstance(self, unittest.TestCase):
raise ValueError('First argument "self" needs to be an instance '
'of unittest.TestCase')
return getattr(self, 'assertRaisesRegexp')(*args, **kwargs)
def assertRegex(self, *args, **kwargs):
if not isinstance(self, unittest.TestCase):
raise ValueError('First argument "self" needs to be an instance '
'of unittest.TestCase')
return getattr(self, 'assertRegexpMatches')(*args, **kwargs)
|
|
# Copyright 2020 The TensorFlow Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for matting."""
from absl.testing import parameterized
import numpy as np
import tensorflow as tf
from tensorflow_graphics.image import matting
from tensorflow_graphics.util import asserts
from tensorflow_graphics.util import shape
from tensorflow_graphics.util import test_case
def _laplacian_matrix(image, size=3, eps=1e-5, name="matting_laplacian_matrix"):
"""Generates the closed form matting Laplacian matrices.
Generates the closed form matting Laplacian as proposed by Levin et
al. in "A Closed Form Solution to Natural Image Matting".
Args:
image: A tensor of shape `[B, H, W, C]`.
size: An `int` representing the size of the patches used to enforce
smoothness.
eps: A small number of type `float` to regularize the problem.
name: A name for this op. Defaults to "matting_laplacian_matrix".
Returns:
A tensor of shape `[B, H, W, size^2, size^2]` containing the
matting Laplacian matrices.
Raises:
ValueError: If `image` is not of rank 4.
"""
with tf.name_scope(name):
image = tf.convert_to_tensor(value=image)
shape.check_static(image, has_rank=4)
if size % 2 == 0:
raise ValueError("The patch size is expected to be an odd value.")
pixels = size**2
channels = tf.shape(input=image)[-1]
dtype = image.dtype
patches = tf.image.extract_patches(
image,
sizes=(1, size, size, 1),
strides=(1, 1, 1, 1),
rates=(1, 1, 1, 1),
padding="VALID")
batches = tf.shape(input=patches)[:-1]
new_shape = tf.concat((batches, (pixels, channels)), axis=-1)
patches = tf.reshape(patches, shape=new_shape)
mean = tf.reduce_mean(input_tensor=patches, axis=-2, keepdims=True)
demean = patches - mean
covariance = tf.matmul(demean, demean, transpose_a=True) / pixels
regularizer = (eps / pixels) * tf.eye(channels, dtype=dtype)
covariance_inv = tf.linalg.inv(covariance + regularizer)
covariance_inv = asserts.assert_no_infs_or_nans(covariance_inv)
mat = tf.matmul(tf.matmul(demean, covariance_inv), demean, transpose_b=True)
return tf.eye(pixels, dtype=dtype) - (1.0 + mat) / pixels
class MattingTest(test_case.TestCase):
@parameterized.parameters((3, 1), (3, 3), (5, 3), (5, 1))
def test_build_matrices_jacobian_random(self, size, channels):
"""Tests the Jacobian of the build_matrices function."""
tensor_shape = np.random.randint(size, 6, size=3)
image_init = np.random.uniform(
0.0, 1.0, size=tensor_shape.tolist() + [channels])
with self.subTest(name="laplacian"):
self.assert_jacobian_is_correct_fn(
lambda image: matting.build_matrices(image, size=size)[0],
[image_init])
with self.subTest(name="pseudo_inverse"):
self.assert_jacobian_is_correct_fn(
lambda image: matting.build_matrices(image, size=size)[1],
[image_init])
@parameterized.parameters((3, 1), (3, 3), (5, 3), (5, 1))
def test_build_matrices_laplacian_zero_rows_and_columns(self, size, channels):
"""Tests that the laplacian matrix rows and columns sum to zero."""
tensor_shape = np.random.randint(size, 6, size=3)
image_init = np.random.uniform(
0.0, 1.0, size=tensor_shape.tolist() + [channels])
image = tf.convert_to_tensor(value=image_init)
laplacian, _ = matting.build_matrices(image, size=size)
rows = tf.reduce_sum(input_tensor=laplacian, axis=-2)
columns = tf.reduce_sum(input_tensor=laplacian, axis=-1)
with self.subTest(name="rows"):
self.assertAllClose(rows, tf.zeros_like(rows))
with self.subTest(name="columns"):
self.assertAllClose(columns, tf.zeros_like(columns))
@parameterized.parameters((3, 1), (3, 3), (5, 3), (5, 1))
def test_build_matrices_laplacian_versions(self, size, channels):
"""Compares two ways of computing the laplacian matrix."""
tensor_shape = np.random.randint(size, 6, size=3)
image_init = np.random.uniform(
0.0, 1.0, size=tensor_shape.tolist() + [channels])
image = tf.convert_to_tensor(value=image_init)
laplacian_v1, _ = matting.build_matrices(image, size=size)
laplacian_v2 = _laplacian_matrix(image, size=size)
self.assertAllClose(laplacian_v1, laplacian_v2)
@parameterized.parameters(
(3, (None, None, None, 1)),
(3, (None, None, None, 3)),
(5, (None, None, None, 1)),
(5, (None, None, None, 3)),
(3, (1, 3, 3, 1)),
(3, (1, 3, 3, 3)),
(5, (1, 5, 5, 1)),
(5, (1, 5, 5, 3)),
)
def test_build_matrices_not_raised(self, size, *shapes):
"""Tests that the shape exceptions are not raised."""
build_matrices = lambda image: matting.build_matrices(image, size=size)
self.assert_exception_is_not_raised(build_matrices, shapes)
@parameterized.parameters(
("tensor must have a rank of 4, but it has rank", 3, (1,)),
("tensor must have a rank of 4, but it has rank", 3, (1, 1, 1, 1, 1)),
("The patch size is expected to be an odd value.", 2, (1, 1, 1, 1)),
)
def test_build_matrices_raised(self, error_msg, size, *shapes):
"""Tests that the shape exceptions are properly raised."""
build_matrices = lambda image: matting.build_matrices(image, size=size)
self.assert_exception_is_raised(build_matrices, error_msg, shapes)
@parameterized.parameters((3,), (5,))
def test_linear_coefficients_jacobian_random(self, size):
"""Tests the Jacobian of the linear_coefficients function."""
tensor_shape = np.random.randint(size, 6, size=3)
matte_init = np.random.uniform(0.0, 1.0, size=tensor_shape.tolist() + [1])
tensor_shape[1:3] -= (size - 1)
num_coeffs = np.random.randint(2, 4)
pseudo_inverse_init = np.random.uniform(
0.0, 1.0, size=tensor_shape.tolist() + [num_coeffs, size**2])
def a_fn(matte, pseudo_inverse):
a, _ = matting.linear_coefficients(matte, pseudo_inverse)
return a
def b_fn(matte, pseudo_inverse):
_, b = matting.linear_coefficients(matte, pseudo_inverse)
return b
with self.subTest(name="a"):
self.assert_jacobian_is_correct_fn(a_fn,
[matte_init, pseudo_inverse_init])
with self.subTest(name="b"):
self.assert_jacobian_is_correct_fn(b_fn,
[matte_init, pseudo_inverse_init])
@parameterized.parameters(
((None, None, None, 1), (None, None, None, 4, 9)),
((None, None, None, 1), (None, None, None, 2, 25)),
((1, 6, 6, 1), (1, 4, 4, 2, 9)),
((1, 10, 10, 1), (1, 6, 6, 2, 25)),
)
def test_linear_coefficients_not_raised(self, *shapes):
"""Tests that the shape exceptions are not raised."""
self.assert_exception_is_not_raised(matting.linear_coefficients, shapes)
@parameterized.parameters(
("must have exactly 1 dimensions in axis -1", (1, 6, 6, 2),
(1, 4, 4, 2, 9)),
("Not all batch dimensions are identical.", (1, 6, 6, 1),
(2, 4, 4, 2, 9)),
)
def test_linear_coefficients_raised(self, error_msg, *shapes):
"""Tests that the shape exceptions are properly raised."""
self.assert_exception_is_raised(matting.linear_coefficients, error_msg,
shapes)
@parameterized.parameters((3,), (5,))
def test_linear_coefficients_reconstruction_same_images(self, size):
"""Tests that the matte can be reconstructed by using the coefficients ."""
tensor_shape = np.random.randint(size, 6, size=3).tolist()
image = np.random.uniform(0.0, 1.0, size=tensor_shape + [1])
_, pseudo_inverse = matting.build_matrices(image, size=size)
a, b = matting.linear_coefficients(image, pseudo_inverse)
reconstructed = matting.reconstruct(image, a, b)
self.assertAllClose(image, reconstructed, atol=1e-4)
@parameterized.parameters((3,), (5,))
def test_linear_coefficients_reconstruction_opposite_images(self, size):
"""Tests that the matte can be reconstructed by using the coefficients ."""
tensor_shape = np.random.randint(size, 6, size=3).tolist()
image = np.random.uniform(0.0, 1.0, size=tensor_shape + [1])
_, pseudo_inverse = matting.build_matrices(image, size=size)
a, b = matting.linear_coefficients(1.0 - image, pseudo_inverse)
reconstructed = matting.reconstruct(image, a, b)
self.assertAllClose(1.0 - image, reconstructed, atol=1e-4)
@parameterized.parameters((3,), (5,))
def test_loss_jacobian_random(self, size):
"""Tests the Jacobian of the matting loss function."""
tensor_shape = np.random.randint(size, 6, size=3)
matte_init = np.random.uniform(0.0, 1.0, size=tensor_shape.tolist() + [1])
tensor_shape[1:3] -= (size - 1)
laplacian_init = np.random.uniform(
0.0, 1.0, size=tensor_shape.tolist() + [size**2, size**2])
with self.subTest(name="matte"):
self.assert_jacobian_is_correct_fn(matting.loss,
[matte_init, laplacian_init])
@parameterized.parameters(
((None, None, None, 1), (None, None, None, 9, 9)),
((None, None, None, 1), (None, None, None, 25, 25)),
((1, 6, 6, 1), (1, 4, 4, 9, 9)),
((1, 10, 10, 1), (1, 6, 6, 25, 25)),
)
def test_loss_not_raised(self, *shapes):
"""Tests that the shape exceptions are not raised."""
self.assert_exception_is_not_raised(matting.loss, shapes)
@parameterized.parameters(
("must have exactly 1 dimensions in axis -1", (1, 6, 6, 2),
(1, 4, 4, 9, 9)),
("must have exactly 9 dimensions in axis -2", (1, 6, 6, 1),
(1, 4, 4, 1, 9)),
("Not all batch dimensions are identical.", (1, 6, 6, 1),
(2, 4, 4, 9, 9)),
)
def test_loss_raised(self, error_msg, *shapes):
"""Tests that the shape exceptions are properly raised."""
self.assert_exception_is_raised(matting.loss, error_msg, shapes)
@parameterized.parameters((3,), (5,))
def test_loss_opposite_images(self, size):
"""Tests that passing opposite images results in a loss close to 0.0."""
tensor_shape = np.random.randint(size, 6, size=3).tolist()
image = np.random.uniform(0.0, 1.0, size=tensor_shape + [1])
laplacian, _ = matting.build_matrices(image, size=size)
loss = matting.loss(1.0 - image, laplacian)
self.assertAllClose(loss, 0.0, atol=1e-4)
@parameterized.parameters((3,), (5,))
def test_loss_same_images(self, size):
"""Tests that passing same images results in a loss close to 0.0."""
tensor_shape = np.random.randint(size, 6, size=3).tolist()
image = np.random.uniform(0.0, 1.0, size=tensor_shape + [1])
laplacian, _ = matting.build_matrices(image, size=size)
loss = matting.loss(image, laplacian)
self.assertAllClose(loss, 0.0, atol=1e-4)
@parameterized.parameters((3,), (5,))
def test_loss_positive(self, size):
"""Tests that the loss is always greater or equal to 0.0."""
tensor_shape = np.random.randint(size, 6, size=3).tolist()
image = tf.random.uniform(minval=0.0, maxval=1.0, shape=tensor_shape + [3])
matte = tf.random.uniform(minval=0.0, maxval=1.0, shape=tensor_shape + [1])
laplacian, _ = matting.build_matrices(image, size=size)
loss = matting.loss(matte, laplacian)
self.assertAllGreaterEqual(loss, 0.0)
@parameterized.parameters((1,), (3,))
def test_reconstruct_jacobian_random(self, channels):
"""Tests the Jacobian of the reconstruct function."""
tensor_shape = np.random.randint(1, 5, size=3).tolist()
image_init = np.random.uniform(0.0, 1.0, size=tensor_shape + [channels])
mul_init = np.random.uniform(0.0, 1.0, size=tensor_shape + [channels])
add_init = np.random.uniform(0.0, 1.0, size=tensor_shape + [1])
self.assert_jacobian_is_correct_fn(matting.reconstruct,
[image_init, mul_init, add_init])
@parameterized.parameters(
((None, None, None, 3), (None, None, None, 3), (None, None, None, 1)),
((1, 6, 6, 3), (1, 6, 6, 3), (1, 6, 6, 1)),
)
def test_reconstruct_not_raised(self, *shapes):
"""Tests that the shape exceptions are not raised."""
self.assert_exception_is_not_raised(matting.reconstruct, shapes)
@parameterized.parameters(
("tensor must have a rank of 4, but it has rank", (1, 6, 6), (1, 6, 6, 2),
(1, 6, 6, 1)),
("tensor must have a rank of 4, but it has rank", (1, 6, 6, 2), (1, 6, 6),
(1, 6, 6, 1)),
("tensor must have a rank of 4, but it has rank", (1, 6, 6, 2),
(1, 6, 6, 2), (1, 6, 6)),
("must have exactly 1 dimensions in axis -1", (1, 6, 6, 2), (1, 6, 6, 2),
(1, 6, 6, 2)),
("Not all batch dimensions are identical.", (1, 6, 6, 1), (1, 6, 6, 4),
(1, 6, 6, 1)),
("Not all batch dimensions are identical.", (1, 6, 6, 1), (1, 4, 6, 1),
(1, 6, 6, 1)),
("Not all batch dimensions are identical.", (1, 6, 6, 1), (1, 6, 6, 1),
(1, 4, 6, 1)),
("Not all batch dimensions are identical.", (1, 6, 6, 1), (1, 6, 4, 1),
(1, 6, 6, 1)),
("Not all batch dimensions are identical.", (1, 6, 6, 1), (1, 6, 6, 1),
(1, 6, 4, 1)),
("Not all batch dimensions are identical.", (1, 6, 6, 1), (4, 6, 6, 1),
(1, 6, 6, 1)),
("Not all batch dimensions are identical.", (1, 6, 6, 1), (1, 6, 6, 1),
(4, 6, 6, 1)),
)
def test_reconstruct_raised(self, error_msg, *shapes):
"""Tests that the shape exceptions are properly raised."""
self.assert_exception_is_raised(matting.reconstruct, error_msg, shapes)
if __name__ == "__main__":
test_case.main()
|
|
# Copyright (c) 2015 Huawei Technologies Co., Ltd.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import six
import uuid
from oslo_config import cfg
from oslo_log import log as logging
from oslo_utils import excutils
from oslo_utils import units
from cinder import exception
from cinder.i18n import _, _LI, _LW
from cinder import utils
from cinder.volume import driver
from cinder.volume.drivers.huawei import constants
from cinder.volume.drivers.huawei import huawei_utils
from cinder.volume.drivers.huawei import rest_client
from cinder.volume.drivers.huawei import smartx
from cinder.volume import utils as volume_utils
from cinder.zonemanager import utils as fczm_utils
LOG = logging.getLogger(__name__)
huawei_opt = [
cfg.StrOpt('cinder_huawei_conf_file',
default='/etc/cinder/cinder_huawei_conf.xml',
help='The configuration file for the Cinder Huawei '
'driver.')]
CONF = cfg.CONF
CONF.register_opts(huawei_opt)
class HuaweiBaseDriver(driver.VolumeDriver):
def __init__(self, *args, **kwargs):
super(HuaweiBaseDriver, self).__init__(*args, **kwargs)
self.configuration = kwargs.get('configuration', None)
if not self.configuration:
msg = _('_instantiate_driver: configuration not found.')
raise exception.InvalidInput(reason=msg)
self.configuration.append_config_values(huawei_opt)
self.xml_file_path = self.configuration.cinder_huawei_conf_file
def do_setup(self, context):
"""Instantiate common class and login storage system."""
self.restclient = rest_client.RestClient(self.configuration)
return self.restclient.login()
def check_for_setup_error(self):
"""Check configuration file."""
return huawei_utils.check_conf_file(self.xml_file_path)
def get_volume_stats(self, refresh=False):
"""Get volume status."""
return self.restclient.update_volume_stats()
@utils.synchronized('huawei', external=True)
def create_volume(self, volume):
"""Create a volume."""
opts = huawei_utils.get_volume_params(volume)
smartx_opts = smartx.SmartX().get_smartx_specs_opts(opts)
params = huawei_utils.get_lun_params(self.xml_file_path,
smartx_opts)
pool_name = volume_utils.extract_host(volume['host'],
level='pool')
pools = self.restclient.find_all_pools()
pool_info = self.restclient.find_pool_info(pool_name, pools)
if not pool_info:
msg = (_('Error in getting pool information for the pool: %s.')
% pool_name)
LOG.error(msg)
raise exception.VolumeBackendAPIException(data=msg)
volume_name = huawei_utils.encode_name(volume['id'])
volume_description = volume['name']
volume_size = huawei_utils.get_volume_size(volume)
LOG.info(_LI(
'Create volume: %(volume)s, size: %(size)s.'),
{'volume': volume_name,
'size': volume_size})
params['pool_id'] = pool_info['ID']
params['volume_size'] = volume_size
params['volume_description'] = volume_description
# Prepare LUN parameters.
lun_param = huawei_utils.init_lun_parameters(volume_name, params)
# Create LUN on the array.
lun_info = self.restclient.create_volume(lun_param)
lun_id = lun_info['ID']
qos = huawei_utils.get_volume_qos(volume)
if qos:
smart_qos = smartx.SmartQos(self.restclient)
smart_qos.create_qos(qos, lun_id)
smartpartition = smartx.SmartPartition(self.restclient)
smartpartition.add(opts, lun_id)
smartcache = smartx.SmartCache(self.restclient)
smartcache.add(opts, lun_id)
return {'provider_location': lun_info['ID'],
'ID': lun_id,
'lun_info': lun_info}
@utils.synchronized('huawei', external=True)
def delete_volume(self, volume):
"""Delete a volume.
Three steps:
Firstly, remove associate from lungroup.
Secondly, remove associate from QoS policy.
Thirdly, remove the lun.
"""
name = huawei_utils.encode_name(volume['id'])
lun_id = volume.get('provider_location', None)
LOG.info(_LI('Delete volume: %(name)s, array lun id: %(lun_id)s.'),
{'name': name, 'lun_id': lun_id},)
if lun_id:
if self.restclient.check_lun_exist(lun_id):
qos_id = self.restclient.get_qosid_by_lunid(lun_id)
if qos_id:
self.remove_qos_lun(lun_id, qos_id)
self.restclient.delete_lun(lun_id)
else:
LOG.warning(_LW("Can't find lun %s on the array."), lun_id)
return False
return True
def remove_qos_lun(self, lun_id, qos_id):
lun_list = self.restclient.get_lun_list_in_qos(qos_id)
lun_count = len(lun_list)
if lun_count <= 1:
qos = smartx.SmartQos(self.restclient)
qos.delete_qos(qos_id)
else:
self.restclient.remove_lun_from_qos(lun_id,
lun_list,
qos_id)
def create_volume_from_snapshot(self, volume, snapshot):
"""Create a volume from a snapshot.
We use LUNcopy to copy a new volume from snapshot.
The time needed increases as volume size does.
"""
snapshotname = huawei_utils.encode_name(snapshot['id'])
snapshot_id = snapshot.get('provider_location', None)
if snapshot_id is None:
snapshot_id = self.restclient.get_snapshotid_by_name(snapshotname)
if snapshot_id is None:
err_msg = (_(
'create_volume_from_snapshot: Snapshot %(name)s '
'does not exist.')
% {'name': snapshotname})
LOG.error(err_msg)
raise exception.VolumeBackendAPIException(data=err_msg)
lun_info = self.create_volume(volume)
tgt_lun_id = lun_info['ID']
luncopy_name = huawei_utils.encode_name(volume['id'])
LOG.info(_LI(
'create_volume_from_snapshot: src_lun_id: %(src_lun_id)s, '
'tgt_lun_id: %(tgt_lun_id)s, copy_name: %(copy_name)s.'),
{'src_lun_id': snapshot_id,
'tgt_lun_id': tgt_lun_id,
'copy_name': luncopy_name})
event_type = 'LUNReadyWaitInterval'
wait_interval = huawei_utils.get_wait_interval(self.xml_file_path,
event_type)
def _volume_ready():
result = self.restclient.get_lun_info(tgt_lun_id)
if result['HEALTHSTATUS'] == constants.STATUS_HEALTH:
if result['RUNNINGSTATUS'] == constants.STATUS_VOLUME_READY:
return True
return False
huawei_utils.wait_for_condition(self.xml_file_path,
_volume_ready,
wait_interval,
wait_interval * 10)
self._copy_volume(volume, luncopy_name,
snapshot_id, tgt_lun_id)
return {'provider_location': lun_info['ID'],
'lun_info': lun_info}
def create_cloned_volume(self, volume, src_vref):
"""Clone a new volume from an existing volume."""
# Form the snapshot structure.
snapshot = {'id': uuid.uuid4().__str__(), 'volume_id': src_vref['id']}
# Create snapshot.
self.create_snapshot(snapshot)
try:
# Create volume from snapshot.
lun_info = self.create_volume_from_snapshot(volume, snapshot)
finally:
try:
# Delete snapshot.
self.delete_snapshot(snapshot)
except exception.VolumeBackendAPIException:
LOG.warning(_LW(
'Failure deleting the snapshot %(snapshot_id)s '
'of volume %(volume_id)s.'),
{'snapshot_id': snapshot['id'],
'volume_id': src_vref['id']},)
return {'provider_location': lun_info['ID'],
'lun_info': lun_info}
@utils.synchronized('huawei', external=True)
def extend_volume(self, volume, new_size):
"""Extend a volume."""
volume_size = huawei_utils.get_volume_size(volume)
new_volume_size = int(new_size) * units.Gi / 512
volume_name = huawei_utils.encode_name(volume['id'])
LOG.info(_LI(
'Extend volume: %(volumename)s, oldsize:'
' %(oldsize)s newsize: %(newsize)s.'),
{'volumename': volume_name,
'oldsize': volume_size,
'newsize': new_volume_size},)
lun_id = self.restclient.get_volume_by_name(volume_name)
if lun_id is None:
msg = (_(
"Can't find lun info on the array, lun name is: %(name)s.")
% {'name': volume_name})
LOG.error(msg)
raise exception.VolumeBackendAPIException(data=msg)
luninfo = self.restclient.extend_volume(lun_id, new_volume_size)
return {'provider_location': luninfo['ID'],
'lun_info': luninfo}
@utils.synchronized('huawei', external=True)
def create_snapshot(self, snapshot):
snapshot_info = self.restclient.create_snapshot(snapshot)
snapshot_id = snapshot_info['ID']
self.restclient.activate_snapshot(snapshot_id)
return {'provider_location': snapshot_info['ID'],
'lun_info': snapshot_info}
@utils.synchronized('huawei', external=True)
def delete_snapshot(self, snapshot):
snapshotname = huawei_utils.encode_name(snapshot['id'])
volume_name = huawei_utils.encode_name(snapshot['volume_id'])
LOG.info(_LI(
'stop_snapshot: snapshot name: %(snapshot)s, '
'volume name: %(volume)s.'),
{'snapshot': snapshotname,
'volume': volume_name},)
snapshot_id = snapshot.get('provider_location', None)
if snapshot_id is None:
snapshot_id = self.restclient.get_snapshotid_by_name(snapshotname)
if snapshot_id is not None:
if self.restclient.check_snapshot_exist(snapshot_id):
self.restclient.stop_snapshot(snapshot_id)
self.restclient.delete_snapshot(snapshot_id)
else:
LOG.warning(_LW("Can't find snapshot on the array."))
else:
LOG.warning(_LW("Can't find snapshot on the array."))
return False
return True
@utils.synchronized('huawei', external=True)
def initialize_connection_fc(self, volume, connector):
wwns = connector['wwpns']
volume_name = huawei_utils.encode_name(volume['id'])
LOG.info(_LI(
'initialize_connection_fc, initiator: %(wwpns)s,'
' volume name: %(volume)s.'),
{'wwpns': wwns,
'volume': volume_name},)
host_name_before_hash = None
host_name = connector['host']
if host_name and (len(host_name) > constants.MAX_HOSTNAME_LENGTH):
host_name_before_hash = host_name
host_name = six.text_type(hash(host_name))
# Create hostgroup if not exist.
host_id = self.restclient.add_host_with_check(host_name,
host_name_before_hash)
# Add host into hostgroup.
hostgroup_id = self.restclient.add_host_into_hostgroup(host_id)
free_wwns = self.restclient.get_connected_free_wwns()
LOG.info(_LI("initialize_connection_fc, the array has free wwns: %s."),
free_wwns)
for wwn in wwns:
if wwn in free_wwns:
self.restclient.add_fc_port_to_host(host_id, wwn)
lun_id = self.restclient.mapping_hostgroup_and_lungroup(volume_name,
hostgroup_id,
host_id)
host_lun_id = self.restclient.find_host_lun_id(host_id, lun_id)
tgt_port_wwns = []
for wwn in wwns:
tgtwwpns = self.restclient.get_fc_target_wwpns(wwn)
if tgtwwpns:
tgt_port_wwns.append(tgtwwpns)
init_targ_map = {}
for initiator in wwns:
init_targ_map[initiator] = tgt_port_wwns
# Return FC properties.
info = {'driver_volume_type': 'fibre_channel',
'data': {'target_lun': int(host_lun_id),
'target_discovered': True,
'target_wwn': tgt_port_wwns,
'volume_id': volume['id'],
'initiator_target_map': init_targ_map}, }
LOG.info(_LI("initialize_connection_fc, return data is: %s."),
info)
return info
@utils.synchronized('huawei', external=True)
def initialize_connection_iscsi(self, volume, connector):
"""Map a volume to a host and return target iSCSI information."""
LOG.info(_LI('Enter initialize_connection_iscsi.'))
initiator_name = connector['initiator']
volume_name = huawei_utils.encode_name(volume['id'])
LOG.info(_LI(
'initiator name: %(initiator_name)s, '
'volume name: %(volume)s.'),
{'initiator_name': initiator_name,
'volume': volume_name})
(iscsi_iqns,
target_ips,
portgroup_id) = self.restclient.get_iscsi_params(self.xml_file_path,
connector)
LOG.info(_LI('initialize_connection_iscsi, iscsi_iqn: %(iscsi_iqn)s, '
'target_ip: %(target_ip)s, '
'portgroup_id: %(portgroup_id)s.'),
{'iscsi_iqn': iscsi_iqns,
'target_ip': target_ips,
'portgroup_id': portgroup_id},)
# Create hostgroup if not exist.
host_name = connector['host']
host_name_before_hash = None
if host_name and (len(host_name) > constants.MAX_HOSTNAME_LENGTH):
host_name_before_hash = host_name
host_name = six.text_type(hash(host_name))
host_id = self.restclient.add_host_with_check(host_name,
host_name_before_hash)
# Add initiator to the host.
self.restclient.ensure_initiator_added(self.xml_file_path,
initiator_name,
host_id)
hostgroup_id = self.restclient.add_host_into_hostgroup(host_id)
# Mapping lungroup and hostgroup to view.
lun_id = self.restclient.mapping_hostgroup_and_lungroup(volume_name,
hostgroup_id,
host_id,
portgroup_id)
hostlun_id = self.restclient.find_host_lun_id(host_id, lun_id)
LOG.info(_LI("initialize_connection_iscsi, host lun id is: %s."),
hostlun_id)
iscsi_conf = huawei_utils.get_iscsi_conf(self.xml_file_path)
chapinfo = self.restclient.find_chap_info(iscsi_conf,
initiator_name)
# Return iSCSI properties.
properties = {}
properties['target_discovered'] = False
properties['volume_id'] = volume['id']
multipath = connector.get('multipath', False)
hostlun_id = int(hostlun_id)
if not multipath:
properties['target_portal'] = ('%s:3260' % target_ips[0])
properties['target_iqn'] = iscsi_iqns[0]
properties['target_lun'] = hostlun_id
else:
properties['target_iqns'] = [iqn for iqn in iscsi_iqns]
properties['target_portals'] = [
'%s:3260' % ip for ip in target_ips]
properties['target_luns'] = [hostlun_id] * len(target_ips)
# If use CHAP, return CHAP info.
if chapinfo:
chap_username, chap_password = chapinfo.split(';')
properties['auth_method'] = 'CHAP'
properties['auth_username'] = chap_username
properties['auth_password'] = chap_password
LOG.info(_LI("initialize_connection_iscsi success. Return data: %s."),
properties)
return {'driver_volume_type': 'iscsi', 'data': properties}
@utils.synchronized('huawei', external=True)
def terminate_connection_iscsi(self, volume, connector):
"""Delete map between a volume and a host."""
initiator_name = connector['initiator']
volume_name = huawei_utils.encode_name(volume['id'])
lun_id = volume.get('provider_location', None)
host_name = connector['host']
LOG.info(_LI(
'terminate_connection_iscsi: volume name: %(volume)s, '
'initiator name: %(ini)s, '
'lun_id: %(lunid)s.'),
{'volume': volume_name,
'ini': initiator_name,
'lunid': lun_id},)
iscsi_conf = huawei_utils.get_iscsi_conf(self.xml_file_path)
portgroup = None
portgroup_id = None
left_lunnum = -1
for ini in iscsi_conf['Initiator']:
if ini['Name'] == initiator_name:
for key in ini:
if key == 'TargetPortGroup':
portgroup = ini['TargetPortGroup']
break
# Remove lun from lungroup.
if lun_id:
if self.restclient.check_lun_exist(lun_id):
# Get lungroup id by lun id.
lungroup_id = self.restclient.get_lungroupid_by_lunid(lun_id)
if lungroup_id:
self.restclient.remove_lun_from_lungroup(lungroup_id,
lun_id)
else:
LOG.warning(_LW("Can't find lun on the array."))
# Remove portgroup from mapping view if no lun left in lungroup.
if portgroup:
portgroup_id = self.restclient.find_tgt_port_group(portgroup)
host_id = self.restclient.find_host(host_name)
if host_id:
mapping_view_name = constants.MAPPING_VIEW_PREFIX + host_id
view_id = self.restclient.find_mapping_view(mapping_view_name)
if view_id:
lungroup_id = self.restclient.find_lungroup_from_map(view_id)
if lungroup_id:
left_lunnum = self.restclient.get_lunnum_from_lungroup(lungroup_id)
if portgroup_id and view_id and (int(left_lunnum) <= 0):
if self.restclient.is_portgroup_associated_to_view(view_id,
portgroup_id):
self.restclient.delete_portgroup_mapping_view(view_id,
portgroup_id)
if view_id and (int(left_lunnum) <= 0):
self.restclient.remove_chap(initiator_name)
if self.restclient.lungroup_associated(view_id, lungroup_id):
self.restclient.delete_lungroup_mapping_view(view_id,
lungroup_id)
self.restclient.delete_lungroup(lungroup_id)
if self.restclient.is_initiator_associated_to_host(initiator_name):
self.restclient.remove_iscsi_from_host(initiator_name)
hostgroup_name = constants.HOSTGROUP_PREFIX + host_id
hostgroup_id = self.restclient.find_hostgroup(hostgroup_name)
if hostgroup_id:
if self.restclient.hostgroup_associated(view_id, hostgroup_id):
self.restclient.delete_hostgoup_mapping_view(view_id,
hostgroup_id)
self.restclient.remove_host_from_hostgroup(hostgroup_id,
host_id)
self.restclient.delete_hostgroup(hostgroup_id)
self.restclient.remove_host(host_id)
self.restclient.delete_mapping_view(view_id)
def terminate_connection_fc(self, volume, connector):
"""Delete map between a volume and a host."""
wwns = connector['wwpns']
volume_name = huawei_utils.encode_name(volume['id'])
lun_id = volume.get('provider_location', None)
host_name = connector['host']
left_lunnum = -1
LOG.info(_LI('terminate_connection_fc: volume name: %(volume)s, '
'wwpns: %(wwns)s, '
'lun_id: %(lunid)s.'),
{'volume': volume_name,
'wwns': wwns,
'lunid': lun_id},)
if lun_id:
if self.restclient.check_lun_exist(lun_id):
# Get lungroup id by lun id.
lungroup_id = self.restclient.get_lungroupid_by_lunid(lun_id)
if not lungroup_id:
LOG.info(_LI("Can't find lun in lungroup."))
else:
self.restclient.remove_lun_from_lungroup(lungroup_id,
lun_id)
else:
LOG.warning(_LW("Can't find lun on the array."))
tgt_port_wwns = []
for wwn in wwns:
tgtwwpns = self.restclient.get_fc_target_wwpns(wwn)
if tgtwwpns:
tgt_port_wwns.append(tgtwwpns)
init_targ_map = {}
for initiator in wwns:
init_targ_map[initiator] = tgt_port_wwns
host_id = self.restclient.find_host(host_name)
if host_id:
mapping_view_name = constants.MAPPING_VIEW_PREFIX + host_id
view_id = self.restclient.find_mapping_view(mapping_view_name)
if view_id:
lungroup_id = self.restclient.find_lungroup_from_map(view_id)
if lungroup_id:
left_lunnum = self.restclient.get_lunnum_from_lungroup(lungroup_id)
if int(left_lunnum) > 0:
info = {'driver_volume_type': 'fibre_channel',
'data': {}}
else:
info = {'driver_volume_type': 'fibre_channel',
'data': {'target_wwn': tgt_port_wwns,
'initiator_target_map': init_targ_map}, }
return info
def migrate_volume(self, context, volume, host):
return (False, None)
def create_export(self, context, volume, connector):
"""Export a volume."""
pass
def ensure_export(self, context, volume):
"""Synchronously recreate an export for a volume."""
pass
def remove_export(self, context, volume):
"""Remove an export for a volume."""
pass
def _copy_volume(self, volume, copy_name, src_lun, tgt_lun):
luncopy_id = self.restclient.create_luncopy(copy_name,
src_lun, tgt_lun)
event_type = 'LUNcopyWaitInterval'
wait_interval = huawei_utils.get_wait_interval(self.xml_file_path,
event_type)
try:
self.restclient.start_luncopy(luncopy_id)
def _luncopy_complete():
luncopy_info = self.restclient.get_luncopy_info(luncopy_id)
if luncopy_info['status'] == constants.STATUS_LUNCOPY_READY:
# luncopy_info['status'] means for the running status of
# the luncopy. If luncopy_info['status'] is equal to '40',
# this luncopy is completely ready.
return True
elif luncopy_info['state'] != constants.STATUS_HEALTH:
# luncopy_info['state'] means for the healthy status of the
# luncopy. If luncopy_info['state'] is not equal to '1',
# this means that an error occurred during the LUNcopy
# operation and we should abort it.
err_msg = (_(
'An error occurred during the LUNcopy operation. '
'LUNcopy name: %(luncopyname)s. '
'LUNcopy status: %(luncopystatus)s. '
'LUNcopy state: %(luncopystate)s.')
% {'luncopyname': luncopy_id,
'luncopystatus': luncopy_info['status'],
'luncopystate': luncopy_info['state']},)
LOG.error(err_msg)
raise exception.VolumeBackendAPIException(data=err_msg)
huawei_utils.wait_for_condition(self.xml_file_path,
_luncopy_complete,
wait_interval)
except Exception:
with excutils.save_and_reraise_exception():
self.restclient.delete_luncopy(luncopy_id)
self.delete_volume(volume)
self.restclient.delete_luncopy(luncopy_id)
class Huawei18000ISCSIDriver(HuaweiBaseDriver, driver.ISCSIDriver):
"""ISCSI driver for Huawei OceanStor 18000 storage arrays.
Version history:
1.0.0 - Initial driver
1.1.0 - Provide Huawei OceanStor 18000 storage volume driver
1.1.1 - Code refactor
CHAP support
Multiple pools support
ISCSI multipath support
SmartX support
"""
VERSION = "1.1.1"
def __init__(self, *args, **kwargs):
super(Huawei18000ISCSIDriver, self).__init__(*args, **kwargs)
def get_volume_stats(self, refresh=False):
"""Get volume status."""
data = HuaweiBaseDriver.get_volume_stats(self, refresh=False)
backend_name = self.configuration.safe_get('volume_backend_name')
data['volume_backend_name'] = backend_name or self.__class__.__name__
data['storage_protocol'] = 'iSCSI'
data['driver_version'] = self.VERSION
data['vendor_name'] = 'Huawei'
return data
def initialize_connection(self, volume, connector):
return HuaweiBaseDriver.initialize_connection_iscsi(self,
volume,
connector)
def terminate_connection(self, volume, connector, **kwargs):
return HuaweiBaseDriver.terminate_connection_iscsi(self,
volume,
connector)
class Huawei18000FCDriver(HuaweiBaseDriver, driver.FibreChannelDriver):
"""FC driver for Huawei OceanStor 18000 storage arrays.
Version history:
1.0.0 - Initial driver
1.1.0 - Provide Huawei OceanStor 18000 storage volume driver
1.1.1 - Code refactor
Multiple pools support
SmartX support
"""
VERSION = "1.1.1"
def __init__(self, *args, **kwargs):
super(Huawei18000FCDriver, self).__init__(*args, **kwargs)
def get_volume_stats(self, refresh=False):
"""Get volume status."""
data = HuaweiBaseDriver.get_volume_stats(self, refresh=False)
backend_name = self.configuration.safe_get('volume_backend_name')
data['volume_backend_name'] = backend_name or self.__class__.__name__
data['storage_protocol'] = 'FC'
data['driver_version'] = self.VERSION
data['verdor_name'] = 'Huawei'
return data
@fczm_utils.AddFCZone
def initialize_connection(self, volume, connector):
return HuaweiBaseDriver.initialize_connection_fc(self,
volume,
connector)
@fczm_utils.RemoveFCZone
def terminate_connection(self, volume, connector, **kwargs):
return HuaweiBaseDriver.terminate_connection_fc(self,
volume,
connector)
|
|
#!/usr/bin/env python
"""
"**Pycco**" is a Python port of [Docco](http://jashkenas.github.com/docco/):
the original quick-and-dirty, hundred-line-long, literate-programming-style
documentation generator. It produces HTML that displays your comments
alongside your code. Comments are passed through
[Markdown](http://daringfireball.net/projects/markdown/syntax) and
[SmartyPants](http://daringfireball.net/projects/smartypants), while code is
passed through [Pygments](http://pygments.org/) for syntax highlighting.
This page is the result of running Pycco against its own source file.
If you install Pycco, you can run it from the command-line:
pycco src/*.py
This will generate linked HTML documentation for the named source files,
saving it into a `docs` folder by default.
The [source for Pycco](https://github.com/fitzgen/pycco) is available on GitHub,
and released under the MIT license.
To install Pycco, simply
pip install pycco
Or, to install the latest source
git clone git://github.com/fitzgen/pycco.git
cd pycco
python setup.py install
"""
# === Main Documentation Generation Functions ===
def generate_documentation(source, outdir=None, preserve_paths=True,
language=None):
"""
Generate the documentation for a source file by reading it in, splitting it
up into comment/code sections, highlighting them for the appropriate
language, and merging them into an HTML template.
"""
if not outdir:
raise TypeError("Missing the required 'outdir' keyword argument.")
code = open(source, "r").read()
language = get_language(source, code, language=language)
sections = parse(source, code, language)
highlight(source, sections, language, preserve_paths=preserve_paths, outdir=outdir)
return generate_html(source, sections, preserve_paths=preserve_paths, outdir=outdir)
def parse(source, code, language):
"""
Given a string of source code, parse out each comment and the code that
follows it, and create an individual **section** for it.
Sections take the form:
{ "docs_text": ...,
"docs_html": ...,
"code_text": ...,
"code_html": ...,
"num": ...
}
"""
lines = code.split("\n")
sections = []
has_code = docs_text = code_text = ""
if lines[0].startswith("#!"):
lines.pop(0)
if language["name"] == "python":
for linenum, line in enumerate(lines[:2]):
if re.search(r'coding[:=]\s*([-\w.]+)', lines[linenum]):
lines.pop(linenum)
break
def save(docs, code):
if docs or code:
sections.append({
"docs_text": docs,
"code_text": code
})
# Setup the variables to get ready to check for multiline comments
multi_line = False
multi_line_delimiters = [language.get("multistart"), language.get("multiend")]
for line in lines:
# Only go into multiline comments section when one of the delimiters is
# found to be at the start of a line
if all(multi_line_delimiters) and any([line.lstrip().startswith(delim) or line.rstrip().endswith(delim) for delim in multi_line_delimiters]):
if not multi_line:
multi_line = True
else:
multi_line = False
if (multi_line
and line.strip().endswith(language.get("multiend"))
and len(line.strip()) > len(language.get("multiend"))):
multi_line = False
# Get rid of the delimiters so that they aren't in the final docs
line = line.replace(language["multistart"], '')
line = line.replace(language["multiend"], '')
docs_text += line.strip() + '\n'
indent_level = re.match("\s*", line).group(0)
if has_code and docs_text.strip():
save(docs_text, code_text[:-1])
code_text = code_text.split('\n')[-1]
has_code = docs_text = ''
elif multi_line:
# Remove leading spaces
if re.match(r' {%d}' % len(indent_level), line):
docs_text += line[len(indent_level):] + '\n'
else:
docs_text += line + '\n'
elif re.match(language["comment_matcher"], line):
if has_code:
save(docs_text, code_text)
has_code = docs_text = code_text = ''
docs_text += re.sub(language["comment_matcher"], "", line) + "\n"
else:
if code_text and any([line.lstrip().startswith(x) for x in ['class ', 'def ', '@']]):
if not code_text.lstrip().startswith("@"):
save(docs_text, code_text)
code_text = has_code = docs_text = ''
has_code = True
code_text += line + '\n'
save(docs_text, code_text)
return sections
# === Preprocessing the comments ===
def preprocess(comment, section_nr, preserve_paths=True, outdir=None):
"""
Add cross-references before having the text processed by markdown. It's
possible to reference another file, like this : `[[main.py]]` which renders
[[main.py]]. You can also reference a specific section of another file, like
this: `[[main.py#highlighting-the-source-code]]` which renders as
[[main.py#highlighting-the-source-code]]. Sections have to be manually
declared; they are written on a single line, and surrounded by equals signs:
`=== like this ===`
"""
if not outdir:
raise TypeError("Missing the required 'outdir' keyword argument.")
def sanitize_section_name(name):
return "-".join(name.lower().strip().split(" "))
def replace_crossref(match):
# Check if the match contains an anchor
if '#' in match.group(1):
name, anchor = match.group(1).split('#')
return " [%s](%s#%s)" % (name,
path.basename(destination(name,
preserve_paths=preserve_paths,
outdir=outdir)),
anchor)
else:
return " [%s](%s)" % (match.group(1),
path.basename(destination(match.group(1),
preserve_paths=preserve_paths,
outdir=outdir)))
def replace_section_name(match):
return '%(lvl)s <span id="%(id)s" href="%(id)s">%(name)s</span>' % {
"lvl" : re.sub('=', '#', match.group(1)),
"id" : sanitize_section_name(match.group(2)),
"name" : match.group(2)
}
comment = re.sub('^([=]+)([^=]+)[=]*\s*$', replace_section_name, comment)
comment = re.sub('[^`]\[\[(.+?)\]\]', replace_crossref, comment)
return comment
# === Highlighting the source code ===
def highlight(source, sections, language, preserve_paths=True, outdir=None):
"""
Highlights a single chunk of code using the **Pygments** module, and runs
the text of its corresponding comment through **Markdown**.
We process the entire file in a single call to Pygments by inserting little
marker comments between each section and then splitting the result string
wherever our markers occur.
"""
if not outdir:
raise TypeError("Missing the required 'outdir' keyword argument.")
output = pygments.highlight(language["divider_text"].join(section["code_text"].rstrip() for section in sections),
language["lexer"],
formatters.get_formatter_by_name("html"))
output = output.replace(highlight_start, "").replace(highlight_end, "")
fragments = re.split(language["divider_html"], output)
for i, section in enumerate(sections):
section["code_html"] = highlight_start + shift(fragments, "") + highlight_end
try:
docs_text = unicode(section["docs_text"])
except UnicodeError:
docs_text = unicode(section["docs_text"].decode('utf-8'))
section["docs_html"] = markdown(preprocess(docs_text,
i,
preserve_paths=preserve_paths,
outdir=outdir))
section["num"] = i
# === HTML Code generation ===
def generate_html(source, sections, preserve_paths=True, outdir=None):
"""
Once all of the code is finished highlighting, we can generate the HTML file
and write out the documentation. Pass the completed sections into the
template found in `resources/pycco.html`.
Pystache will attempt to recursively render context variables, so we must
replace any occurences of `{{`, which is valid in some languages, with a
"unique enough" identifier before rendering, and then post-process the
rendered template and change the identifier back to `{{`.
"""
if not outdir:
raise TypeError("Missing the required 'outdir' keyword argument")
title = path.basename(source)
dest = destination(source, preserve_paths=preserve_paths, outdir=outdir)
csspath = path.relpath(path.join(outdir, "pycco.css"), path.split(dest)[0])
for sect in sections:
sect["code_html"] = re.sub(r"\{\{", r"__DOUBLE_OPEN_STACHE__", sect["code_html"])
rendered = pycco_template({
"title" : title,
"stylesheet" : csspath,
"sections" : sections,
"source" : source,
"path" : path,
"destination" : destination
})
return re.sub(r"__DOUBLE_OPEN_STACHE__", "{{", rendered).encode("utf-8")
# === Helpers & Setup ===
# This module contains all of our static resources.
import pycco_resources
# Import our external dependencies.
import optparse
import os
import pygments
import pystache
import re
import sys
import time
from markdown import markdown
from os import path
from pygments import lexers, formatters
# A list of the languages that Pycco supports, mapping the file extension to
# the name of the Pygments lexer and the symbol that indicates a comment. To
# add another language to Pycco's repertoire, add it here.
languages = {
".coffee": { "name": "coffee-script", "symbol": "#",
"multistart": '###', "multiend": '###' },
".pl": { "name": "perl", "symbol": "#" },
".sql": { "name": "sql", "symbol": "--" },
".c": { "name": "c", "symbol": "//",
"multistart": "/*", "multiend": "*/"},
".cpp": { "name": "cpp", "symbol": "//"},
".js": { "name": "javascript", "symbol": "//",
"multistart": "/*", "multiend": "*/"},
".rb": { "name": "ruby", "symbol": "#",
"multistart": "=begin", "multiend": "=end"},
".py": { "name": "python", "symbol": "#",
"multistart": '"""', "multiend": '"""' },
".scm": { "name": "scheme", "symbol": ";;",
"multistart": "#|", "multiend": "|#"},
".lua": { "name": "lua", "symbol": "--",
"multistart": "--[[", "multiend": "--]]"},
".erl": { "name": "erlang", "symbol": "%%" },
".tcl": { "name": "tcl", "symbol": "#" },
".hs": { "name": "haskell", "symbol": "--",
"multistart": "{-", "multiend": "-}"},
".feature": { "name": "gherkin", "symbol": "#" },
}
# Build out the appropriate matchers and delimiters for each language.
for ext, l in languages.items():
# Does the line begin with a comment?
l["comment_matcher"] = re.compile(r"^\s*" + l["symbol"] + "\s?")
# The dividing token we feed into Pygments, to delimit the boundaries between
# sections.
l["divider_text"] = "\n" + l["symbol"] + "DIVIDER\n"
# The mirror of `divider_text` that we expect Pygments to return. We can split
# on this to recover the original sections.
l["divider_html"] = re.compile(r'\n*<span class="c[1]?">' + l["symbol"] + 'DIVIDER</span>\n*')
# Get the Pygments Lexer for this language.
l["lexer"] = lexers.get_lexer_by_name(l["name"])
def get_language(source, code, language=None):
"""Get the current language we're documenting, based on the extension."""
if language is not None:
for l in languages.values():
if l["name"] == language:
return l
else:
raise ValueError("Unknown forced language: " + language)
m = re.match(r'.*(\..+)', os.path.basename(source))
if m and m.group(1) in languages:
return languages[m.group(1)]
else:
lang = lexers.guess_lexer(code).name.lower()
for l in languages.values():
if l["name"] == lang:
return l
else:
raise ValueError("Can't figure out the language!")
def destination(filepath, preserve_paths=True, outdir=None):
"""
Compute the destination HTML path for an input source file path. If the
source is `lib/example.py`, the HTML will be at `docs/example.html`
"""
dirname, filename = path.split(filepath)
if not outdir:
raise TypeError("Missing the required 'outdir' keyword argument.")
try:
name = re.sub(r"\.[^.]*$", "", filename)
except ValueError:
name = filename
if preserve_paths:
name = path.join(dirname, name)
return path.join(outdir, "%s.html" % name)
def shift(list, default):
"""
Shift items off the front of the `list` until it is empty, then return
`default`.
"""
try:
return list.pop(0)
except IndexError:
return default
def ensure_directory(directory):
"""Ensure that the destination directory exists."""
if not os.path.isdir(directory):
os.makedirs(directory)
def template(source):
return lambda context: pystache.render(source, context)
# Create the template that we will use to generate the Pycco HTML page.
pycco_template = template(pycco_resources.html)
# The CSS styles we'd like to apply to the documentation.
pycco_styles = pycco_resources.css
# The start of each Pygments highlight block.
highlight_start = "<div class=\"highlight\"><pre>"
# The end of each Pygments highlight block.
highlight_end = "</pre></div>"
def process(sources, preserve_paths=True, outdir=None, language=None):
"""For each source file passed as argument, generate the documentation."""
if not outdir:
raise TypeError("Missing the required 'outdir' keyword argument.")
# Make a copy of sources given on the command line. `main()` needs the
# original list when monitoring for changed files.
sources = sorted(sources)
# Proceed to generating the documentation.
if sources:
ensure_directory(outdir)
css = open(path.join(outdir, "pycco.css"), "w")
css.write(pycco_styles)
css.close()
def next_file():
s = sources.pop(0)
dest = destination(s, preserve_paths=preserve_paths, outdir=outdir)
try:
os.makedirs(path.split(dest)[0])
except OSError:
pass
with open(dest, "w") as f:
f.write(generate_documentation(s, preserve_paths=preserve_paths, outdir=outdir,
language=language))
print "pycco = %s -> %s" % (s, dest)
if sources:
next_file()
next_file()
__all__ = ("process", "generate_documentation")
def monitor(sources, opts):
"""Monitor each source file and re-generate documentation on change."""
# The watchdog modules are imported in `main()` but we need to re-import
# here to bring them into the local namespace.
import watchdog.events
import watchdog.observers
# Watchdog operates on absolute paths, so map those to original paths
# as specified on the command line.
absolute_sources = dict((os.path.abspath(source), source)
for source in sources)
class RegenerateHandler(watchdog.events.FileSystemEventHandler):
"""A handler for recompiling files which triggered watchdog events"""
def on_modified(self, event):
"""Regenerate documentation for a file which triggered an event"""
# Re-generate documentation from a source file if it was listed on
# the command line. Watchdog monitors whole directories, so other
# files may cause notifications as well.
if event.src_path in absolute_sources:
process([absolute_sources[event.src_path]],
outdir=opts.outdir,
preserve_paths=opts.paths)
# Set up an observer which monitors all directories for files given on
# the command line and notifies the handler defined above.
event_handler = RegenerateHandler()
observer = watchdog.observers.Observer()
directories = set(os.path.split(source)[0] for source in sources)
for directory in directories:
observer.schedule(event_handler, path=directory)
# Run the file change monitoring loop until the user hits Ctrl-C.
observer.start()
try:
while True:
time.sleep(1)
except KeyboardInterrupt:
observer.stop()
observer.join()
def main():
"""Hook spot for the console script."""
parser = optparse.OptionParser()
parser.add_option('-p', '--paths', action='store_true',
help='Preserve path structure of original files')
parser.add_option('-d', '--directory', action='store', type='string',
dest='outdir', default='docs',
help='The output directory that the rendered files should go to.')
parser.add_option('-w', '--watch', action='store_true',
help='Watch original files and re-generate documentation on changes')
parser.add_option('-l', '--force-language', action='store', type='string',
dest='language', default=None,
help='Force the language for the given files')
opts, sources = parser.parse_args()
process(sources, outdir=opts.outdir, preserve_paths=opts.paths,
language=opts.language)
# If the -w / --watch option was present, monitor the source directories
# for changes and re-generate documentation for source files whenever they
# are modified.
if opts.watch:
try:
import watchdog.events
import watchdog.observers
except ImportError:
sys.exit('The -w/--watch option requires the watchdog package.')
monitor(sources, opts)
# Run the script.
if __name__ == "__main__":
main()
|
|
# Copyright (c) 2015, Ecole Polytechnique Federale de Lausanne, Blue Brain Project
# All rights reserved.
#
# This file is part of NeuroM <https://github.com/BlueBrain/NeuroM>
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# 2. Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# 3. Neither the name of the copyright holder nor the names of
# its contributors may be used to endorse or promote products
# derived from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY
# DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
# (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
# ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
'''Fast neuron IO module'''
import logging
from collections import defaultdict, namedtuple
import numpy as np
from neurom.core.dataformat import COLS, POINT_TYPE, ROOT_ID
L = logging.getLogger(__name__)
TYPE, ID, PID = 0, 1, 2
class DataWrapper(object):
'''Class holding a raw data block and section information'''
def __init__(self, data_block, fmt, sections=None):
'''Section Data Wrapper
data_block is np.array-like with the following columns:
[X, Y, Z, R, TYPE, ID, P]
X(float): x-coordinate
Y(float): y-coordinate
Z(float): z-coordinate
R(float): radius
TYPE(integer): one of the types described by POINT_TYPE
ID(integer): unique integer given to each point, the `ROOT_ID` is -1
P(integer): the ID of the parent
Args:
data_block: as defined above
fmt: File format designation, eg: SWC
sections: Already extracted sections, otherwise data_block will be used
Notes:
- there is no ordering constraint: a child can reference a parent ID that comes
later in the block
- there is no requirement that the IDs are dense
- there is no upper bound on the number of rows with the same 'P'arent: in other
words, multifurcations are allowed
'''
self.data_block = data_block
self.fmt = fmt
# list of DataBlockSection
self.sections = sections if sections is not None else _extract_sections(data_block)
def neurite_root_section_ids(self):
'''Get the section IDs of the intitial neurite sections'''
sec = self.sections
return [i for i, ss in enumerate(sec)
if ss.pid > -1 and (sec[ss.pid].ntype == POINT_TYPE.SOMA and
ss.ntype != POINT_TYPE.SOMA)]
def soma_points(self):
'''Get the soma points'''
db = self.data_block
return db[db[:, COLS.TYPE] == POINT_TYPE.SOMA]
def _merge_sections(sec_a, sec_b):
'''Merge two sections
Merges sec_a into sec_b and sets sec_a attributes to default
'''
sec_b.ids = list(sec_a.ids) + list(sec_b.ids[1:])
sec_b.ntype = sec_a.ntype
sec_b.pid = sec_a.pid
sec_a.ids = []
sec_a.pid = -1
sec_a.ntype = 0
def _section_end_points(structure_block, id_map):
'''Get the section end-points'''
soma_idx = structure_block[:, TYPE] == POINT_TYPE.SOMA
soma_ids = structure_block[soma_idx, ID]
neurite_idx = structure_block[:, TYPE] != POINT_TYPE.SOMA
neurite_rows = structure_block[neurite_idx, :]
soma_end_pts = set(id_map[id_]
for id_ in soma_ids[np.in1d(soma_ids, neurite_rows[:, PID])])
# end points have either no children or more than one
# ie: leaf or multifurcation nodes
n_children = defaultdict(int)
for row in structure_block:
n_children[row[PID]] += 1
end_pts = set(i for i, row in enumerate(structure_block)
if n_children[row[ID]] != 1)
return end_pts.union(soma_end_pts)
class DataBlockSection(object):
'''sections ((ids), type, parent_id)'''
def __init__(self, ids=None, ntype=0, pid=-1):
self.ids = [] if ids is None else ids
self.ntype = ntype
self.pid = pid
def __eq__(self, other):
return (self.ids == other.ids and
self.ntype == other.ntype and
self.pid == other.pid)
def __str__(self):
return ('%s: ntype=%s, pid=%s: n_ids=%d' %
(self.__class__, self.ntype, self.pid, len(self.ids)))
__repr__ = __str__
def _extract_sections(data_block):
'''Make a list of sections from an SWC-style data wrapper block'''
structure_block = data_block[:, COLS.TYPE:COLS.COL_COUNT].astype(np.int)
# SWC ID -> structure_block position
id_map = {-1: -1}
for i, row in enumerate(structure_block):
id_map[row[ID]] = i
# end points have either no children, more than one, or are the start
# of a new gap
sec_end_pts = _section_end_points(structure_block, id_map)
# a 'gap' is when a section has part of it's segments interleaved
# with those of another section
gap_sections = set()
sections = []
def new_section():
'''new_section'''
sections.append(DataBlockSection())
return sections[-1]
curr_section = new_section()
parent_section = {-1: -1}
for row in structure_block:
row_id = id_map[row[ID]]
parent_id = id_map[row[PID]]
if not curr_section.ids:
# first in section point is parent
curr_section.ids.append(parent_id)
curr_section.ntype = row[TYPE]
gap = parent_id != curr_section.ids[-1]
# If parent is not the previous point, create a section end-point.
# Else add the point to this section
if gap:
sec_end_pts.add(row_id)
else:
curr_section.ids.append(row_id)
if row_id in sec_end_pts:
parent_section[curr_section.ids[-1]] = len(sections) - 1
# Parent-child discontinuity section
if gap:
curr_section = new_section()
curr_section.ids.extend((parent_id, row_id))
curr_section.ntype = row[TYPE]
gap_sections.add(len(sections) - 2)
elif row_id != len(data_block) - 1:
# avoid creating an extra DataBlockSection for last row if it's a leaf
curr_section = new_section()
for sec in sections:
# get the section parent ID from the id of the first point.
if sec.ids:
sec.pid = parent_section[sec.ids[0]]
# join gap sections and "disable" first half
if sec.pid in gap_sections:
_merge_sections(sections[sec.pid], sec)
# TODO find a way to remove empty sections. Currently they are
# required to maintain tree integrity.
return sections
class BlockNeuronBuilder(object):
'''Helper to create DataWrapper for 'block' sections
This helps create a new DataWrapper when one already has 'blocks'
(ie: contiguous points, forming all the segments) of a section, and they
just need to connect them together based on their parent.
Example:
>>> builder = BlockNeuronBuilder()
>>> builder.add_section(segment_id, parent_id, segment_type, points)
...
>>> morph = builder.get_datawrapper()
Note:
This will re-number the IDs if they are not 'dense' (ie: have gaps)
'''
BlockSection = namedtuple('BlockSection', 'parent_id section_type points')
def __init__(self):
self.sections = {}
def add_section(self, id_, parent_id, section_type, points):
'''add a section
Args:
id_(int): identifying number of the section
parent_id(int): identifying number of the parent of this section
section_type(int): the section type as defined by POINT_TYPE
points is an array of [X, Y, Z, R]
'''
# L.debug('Adding section %d, with parent %d, of type: %d with count: %d',
# id_, parent_id, section_type, len(points))
assert id_ not in self.sections, 'id %s already exists in sections' % id_
self.sections[id_] = BlockNeuronBuilder.BlockSection(parent_id, section_type, points)
def _make_datablock(self):
'''Make a data_block and sections list as required by DataWrapper'''
section_ids = sorted(self.sections)
# create all insertion id's, this needs to be done ahead of time
# as some of the children may have a lower id than their parents
id_to_insert_id = {}
row_count = 0
for section_id in section_ids:
row_count += len(self.sections[section_id].points)
id_to_insert_id[section_id] = row_count - 1
datablock = np.empty((row_count, COLS.COL_COUNT), dtype=np.float)
datablock[:, COLS.ID] = np.arange(len(datablock))
datablock[:, COLS.P] = datablock[:, COLS.ID] - 1
sections = []
insert_index = 0
for id_ in section_ids:
sec = self.sections[id_]
points, section_type, parent_id = sec.points, sec.section_type, sec.parent_id
idx = slice(insert_index, insert_index + len(points))
datablock[idx, COLS.XYZR] = points
datablock[idx, COLS.TYPE] = section_type
datablock[idx.start, COLS.P] = id_to_insert_id.get(parent_id, ROOT_ID)
sections.append(DataBlockSection(idx, section_type, parent_id))
insert_index = idx.stop
return datablock, sections
def _check_consistency(self):
'''see if the sections have obvious errors'''
type_count = defaultdict(int)
for _, section in sorted(self.sections.items()):
type_count[section.section_type] += 1
if type_count[POINT_TYPE.SOMA] != 1:
L.info('Have %d somas, expected 1', type_count[POINT_TYPE.SOMA])
def get_datawrapper(self, file_format='BlockNeuronBuilder', data_wrapper=DataWrapper):
'''returns a DataWrapper'''
self._check_consistency()
datablock, sections = self._make_datablock()
return data_wrapper(datablock, file_format, sections)
|
|
import tensorflow as tf # neural network for function approximation
import gym # environment
import numpy as np # matrix operation and math functions
from gym import wrappers
import gym_morph # customized environment for cart-pole
import matplotlib
matplotlib.use('Agg')
import matplotlib.pyplot as plt
import time
start_time = time.time()
MAX_TEST = 10
for test_num in range(1, MAX_TEST+1):
# Hyperparameters
RANDOM_NUMBER_SEED = test_num
ENVIRONMENT1 = "morph-v0"
MAX_EPISODES = 40000 # number of episodes
EPISODE_LENGTH = 6000 # single episode length
HIDDEN_SIZE = 16
DISPLAY_WEIGHTS = False # Help debug weight update
gamma = 0.99 # Discount per step
RENDER = False # Render the cart-pole system
VIDEO_INTERVAL = 100 # Generate a video at this interval
CONSECUTIVE_TARGET = 100 # Including previous 100 rewards
CONST_LR = True # Constant or decaying learing rate
# Constant learning rate
const_learning_rate_in = 0.0008
# Decay learning rate
start_learning_rate_in = 0.003
decay_steps_in = 100
decay_rate_in = 0.96
DIR_PATH_SAVEFIG = "/root/cartpole_plot/"
if CONST_LR:
learning_rate = const_learning_rate_in
file_name_savefig = "el" + str(EPISODE_LENGTH) \
+ "_hn" + str(HIDDEN_SIZE) \
+ "_clr" + str(learning_rate).replace(".", "p") \
+ "_test" + str(test_num) \
+ ".png"
else:
start_learning_rate = start_learning_rate_in
decay_steps = decay_steps_in
decay_rate = decay_rate_in
file_name_savefig = "el" + str(EPISODE_LENGTH) \
+ "_hn" + str(HIDDEN_SIZE) \
+ "_dlr_slr" + str(start_learning_rate).replace(".", "p") \
+ "_ds" + str(decay_steps) \
+ "_dr" + str(decay_rate).replace(".", "p") \
+ "_test" + str(test_num) \
+ ".png"
env = gym.make(ENVIRONMENT1)
env.seed(RANDOM_NUMBER_SEED)
np.random.seed(RANDOM_NUMBER_SEED)
tf.set_random_seed(RANDOM_NUMBER_SEED)
# Input and output sizes
input_size = 4
output_size = 2
# input_size = env.observation_space.shape[0]
# try:
# output_size = env.action_space.shape[0]
# except AttributeError:
# output_size = env.action_space.n
# Tensorflow network setup
x = tf.placeholder(tf.float32, shape=(None, input_size))
y = tf.placeholder(tf.float32, shape=(None, 1))
if not CONST_LR:
# decay learning rate
global_step = tf.Variable(0, trainable=False)
learning_rate = tf.train.exponential_decay(start_learning_rate, global_step, decay_steps, decay_rate, staircase=False)
expected_returns = tf.placeholder(tf.float32, shape=(None, 1))
# Xavier (2010) weights initializer for uniform distribution:
# x = sqrt(6. / (in + out)); [-x, x]
w_init = tf.contrib.layers.xavier_initializer()
hidden_W = tf.get_variable("W1", shape=[input_size, HIDDEN_SIZE],
initializer=w_init)
hidden_B = tf.Variable(tf.zeros(HIDDEN_SIZE))
dist_W = tf.get_variable("W2", shape=[HIDDEN_SIZE, output_size],
initializer=w_init)
dist_B = tf.Variable(tf.zeros(output_size))
hidden = tf.nn.elu(tf.matmul(x, hidden_W) + hidden_B)
dist = tf.tanh(tf.matmul(hidden, dist_W) + dist_B)
dist_soft = tf.nn.log_softmax(dist)
dist_in = tf.matmul(dist_soft, tf.Variable([[1.], [0.]]))
pi = tf.contrib.distributions.Bernoulli(dist_in)
pi_sample = pi.sample()
log_pi = pi.log_prob(y)
if CONST_LR:
optimizer = tf.train.RMSPropOptimizer(learning_rate)
train = optimizer.minimize(-1.0 * expected_returns * log_pi)
else:
optimizer = tf.train.RMSPropOptimizer(learning_rate)
train = optimizer.minimize(-1.0 * expected_returns * log_pi, global_step=global_step)
# saver = tf.train.Saver()
# Create and initialize a session
sess = tf.Session()
sess.run(tf.global_variables_initializer())
def run_episode(environment, ep, render=False):
raw_reward = 0
discounted_reward = 0
cumulative_reward = []
discount = 1.0
states = []
actions = []
obs = environment.reset()
done = False
while not done:
states.append(obs)
cumulative_reward.append(discounted_reward)
if render and ((ep % VIDEO_INTERVAL) == 0):
environment.render()
action = sess.run(pi_sample, feed_dict={x: [obs]})[0]
actions.append(action)
obs, reward, done, info = env.step(action[0])
raw_reward += reward
if reward > 0:
discounted_reward += reward * discount
else:
discounted_reward += reward
discount *= gamma
return raw_reward, discounted_reward, cumulative_reward, states, actions
def display_weights(session):
w1 = session.run(hidden_W)
b1 = session.run(hidden_B)
w2 = session.run(dist_W)
b2 = session.run(dist_B)
print(w1, b1, w2, b2)
returns = []
mean_returns = []
for ep in range(MAX_EPISODES):
raw_G, discounted_G, cumulative_G, ep_states, ep_actions = \
run_episode(env, ep, RENDER)
expected_R = np.transpose([discounted_G - np.array(cumulative_G)])
sess.run(train, feed_dict={x: ep_states, y: ep_actions,
expected_returns: expected_R})
if DISPLAY_WEIGHTS:
display_weights(sess)
returns.append(raw_G)
running_returns = returns[max(0, ep-CONSECUTIVE_TARGET):(ep+1)]
mean_return = np.mean(running_returns)
mean_returns.append(mean_return)
if CONST_LR:
msg = "Test: {}/{}, Episode: {}/{}, Time: {}, Learning rate: {}, Return: {}, Last {} returns mean: {}"
msg = msg.format(test_num, MAX_TEST, ep+1, MAX_EPISODES, time.strftime('%H:%M:%S', time.gmtime(time.time()-start_time)), learning_rate, raw_G, CONSECUTIVE_TARGET, mean_return)
print(msg)
else:
msg = "Test: {}/{}, Episode: {}/{}, Time: {}, Learning rate: {}, Return: {}, Last {} returns mean: {}"
msg = msg.format(test_num, MAX_TEST, ep+1, MAX_EPISODES, time.strftime('%H:%M:%S', time.gmtime(time.time()-start_time)), sess.run(learning_rate), raw_G, CONSECUTIVE_TARGET, mean_return)
print(msg)
env.close() # close openai gym environment
tf.reset_default_graph() # clear tensorflow graph
# Plot
# plt.style.use('ggplot')
plt.style.use('dark_background')
episodes_plot = np.arange(MAX_EPISODES)
fig = plt.figure()
ax = fig.add_subplot(111)
fig.subplots_adjust(top=0.85)
if CONST_LR:
ax.set_title("The Cart-Pole Problem Test %i \n \
Episode Length: %i \
Discount Factor: %.2f \n \
Number of Hidden Neuron: %i \
Constant Learning Rate: %.5f" % (test_num, EPISODE_LENGTH, gamma, HIDDEN_SIZE, learning_rate))
else:
ax.set_title("The Cart-Pole Problem Test %i \n \
EpisodeLength: %i DiscountFactor: %.2f NumHiddenNeuron: %i \n \
Decay Learning Rate: (start: %.5f, steps: %i, rate: %.2f)" % (test_num, EPISODE_LENGTH, gamma, HIDDEN_SIZE, start_learning_rate, decay_steps, decay_rate))
ax.set_xlabel("Episode")
ax.set_ylabel("Return")
ax.set_ylim((0, EPISODE_LENGTH))
ax.grid(linestyle='--')
ax.plot(episodes_plot, returns, label='Instant return')
ax.plot(episodes_plot, mean_returns, label='Averaged return')
legend = ax.legend(loc='best', shadow=True)
fig.savefig(DIR_PATH_SAVEFIG + file_name_savefig, dpi=500)
# plt.show()
|
|
#!/usr/bin/env python
# Copyright (c) 2018 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Reads lines from files or stdin and identifies C++ tests.
Outputs a filter that can be used with --gtest_filter or a filter file to
run only the tests identified.
Usage:
Outputs filter for all test fixtures in a directory. --class-only avoids an
overly long filter string.
$ cat components/mycomp/**test.cc | make_gtest_filter.py --class-only
Outputs filter for all tests in a file.
$ make_gtest_filter.py ./myfile_unittest.cc
Outputs filter for only test at line 123
$ make_gtest_filter.py --line=123 ./myfile_unittest.cc
Formats output as a GTest filter file.
$ make_gtest_filter.py ./myfile_unittest.cc --as-filter-file
Use a JSON failure summary as the input.
$ make_gtest_filter.py summary.json --from-failure-summary
Elide the filter list using wildcards when possible.
$ make_gtest_filter.py summary.json --from-failure-summary --wildcard-compress
"""
from __future__ import print_function
import argparse
import collections
import fileinput
import json
import re
import sys
class TrieNode:
def __init__(self):
# The number of strings which terminated on or underneath this node.
self.num_strings = 0
# The prefix subtries which follow |this|, keyed by their next character.
self.children = {}
def PascalCaseSplit(input_string):
current_term = []
prev_char = ''
for current_char in input_string:
is_boundary = prev_char != '' and \
((current_char.isupper() and prev_char.islower()) or \
(current_char.isalpha() != prev_char.isalpha()) or \
(current_char.isalnum() != prev_char.isalnum()))
prev_char = current_char
if is_boundary:
yield ''.join(current_term)
current_term = []
current_term.append(current_char)
if len(current_term) > 0:
yield ''.join(current_term)
def TrieInsert(trie, value):
"""Inserts the characters of 'value' into a trie, with every edge representing
a single character. An empty child set indicates end-of-string."""
for term in PascalCaseSplit(value):
trie.num_strings = trie.num_strings + 1
if term in trie.children:
trie = trie.children[term]
else:
subtrie = TrieNode()
trie.children[term] = subtrie
trie = subtrie
trie.num_strings = trie.num_strings + 1
def ComputeWildcardsFromTrie(trie, min_depth, min_cases):
"""Computes a list of wildcarded test case names from a trie using a depth
first traversal."""
WILDCARD = '*'
# Stack of values to process, initialized with the root node.
# The first item of the tuple is the substring represented by the traversal so
# far.
# The second item of the tuple is the TrieNode itself.
# The third item is the depth of the traversal so far.
to_process = [('', trie, 0)]
while len(to_process) > 0:
cur_prefix, cur_trie, cur_depth = to_process.pop()
assert (cur_trie.num_strings != 0)
if len(cur_trie.children) == 0:
# No more children == we're at the end of a string.
yield cur_prefix
elif (cur_depth == min_depth) and \
cur_trie.num_strings > min_cases:
# Trim traversal of this path if the path is deep enough and there
# are enough entries to warrant elision.
yield cur_prefix + WILDCARD
else:
# Traverse all children of this node.
for term, subtrie in cur_trie.children.items():
to_process.append((cur_prefix + term, subtrie, cur_depth + 1))
def CompressWithWildcards(test_list, min_depth, min_cases):
"""Given a list of SUITE.CASE names, generates an exclusion list using
wildcards to reduce redundancy.
For example:
Foo.TestOne
Foo.TestTwo
becomes:
Foo.Test*"""
suite_tries = {}
# First build up a trie based representations of all test case names,
# partitioned per-suite.
for case in test_list:
suite_name, test = case.split('.')
if not suite_name in suite_tries:
suite_tries[suite_name] = TrieNode()
TrieInsert(suite_tries[suite_name], test)
output = []
# Go through the suites' tries and generate wildcarded representations
# of the cases.
for suite in suite_tries.items():
suite_name, cases_trie = suite
for case_wildcard in ComputeWildcardsFromTrie(cases_trie, min_depth, \
min_cases):
output.append("{}.{}".format(suite_name, case_wildcard))
output.sort()
return output
def GetFailedTestsFromTestLauncherSummary(summary):
failures = set()
for iteration in summary['per_iteration_data']:
for case_name, results in iteration.items():
for result in results:
if result['status'] == 'FAILURE':
failures.add(case_name)
return list(failures)
def main():
parser = argparse.ArgumentParser()
parser.add_argument(
'--input-format',
choices=['swarming_summary', 'test_launcher_summary', 'test_file'],
default='test_file')
parser.add_argument('--output-format',
choices=['file', 'args'],
default='args')
parser.add_argument('--wildcard-compress', action='store_true')
parser.add_argument(
'--wildcard-min-depth',
default=1,
help="Minimum number of terms in a case before a wildcard may be " +
"used, so that prefixes are not excessively broad.")
parser.add_argument(
'--wildcard-min-cases',
default=3,
help="Minimum number of cases in a filter before folding into a " +
"wildcard, so as to not create wildcards needlessly for small "
"numbers of similarly named test failures.")
parser.add_argument('--line', type=int)
parser.add_argument('--class-only', action='store_true')
parser.add_argument(
'--as-exclusions',
action='store_true',
help='Generate exclusion rules for test cases, instead of inclusions.')
args, left = parser.parse_known_args()
test_filters = []
if args.input_format == 'swarming_summary':
# Decode the JSON files separately and combine their contents.
test_filters = []
for json_file in left:
test_filters.extend(json.loads('\n'.join(open(json_file, 'r'))))
if args.wildcard_compress:
test_filters = CompressWithWildcards(test_filters,
args.wildcard_min_depth,
args.wildcard_min_cases)
elif args.input_format == 'test_launcher_summary':
# Decode the JSON files separately and combine their contents.
test_filters = []
for json_file in left:
test_filters.extend(
GetFailedTestsFromTestLauncherSummary(
json.loads('\n'.join(open(json_file, 'r')))))
if args.wildcard_compress:
test_filters = CompressWithWildcards(test_filters,
args.wildcard_min_depth,
args.wildcard_min_cases)
else:
file_input = fileinput.input(left)
if args.line:
# If --line is used, restrict text to a few lines around the requested
# line.
requested_line = args.line
selected_lines = []
for line in file_input:
if (fileinput.lineno() >= requested_line
and fileinput.lineno() <= requested_line + 1):
selected_lines.append(line)
txt = ''.join(selected_lines)
else:
txt = ''.join(list(file_input))
# This regex is not exhaustive, and should be updated as needed.
rx = re.compile(
r'^(?:TYPED_)?(?:IN_PROC_BROWSER_)?TEST(_F|_P)?\(\s*(\w+)\s*' + \
r',\s*(\w+)\s*\)',
flags=re.DOTALL | re.M)
tests = []
for m in rx.finditer(txt):
tests.append(m.group(2) + '.' + m.group(3))
# Note: Test names have the following structures:
# * FixtureName.TestName
# * InstantiationName/FixtureName.TestName/##
# Since this script doesn't parse instantiations, we generate filters to
# match either regular tests or instantiated tests.
if args.wildcard_compress:
test_filters = CompressWithWildcards(tests, args.wildcard_min_depth,
args.wildcard_min_cases)
elif args.class_only:
fixtures = set([t.split('.')[0] for t in tests])
test_filters = [c + '.*' for c in fixtures] + \
['*/' + c + '.*/*' for c in fixtures]
else:
test_filters = ['*/' + c + '/*' for c in tests]
if args.as_exclusions:
test_filters = ['-' + x for x in test_filters]
if args.output_format == 'file':
print('\n'.join(test_filters))
else:
print(':'.join(test_filters))
return 0
if __name__ == '__main__':
sys.exit(main())
|
|
import numpy as np
import pytest
from pandas._libs.tslibs.period import IncompatibleFrequency
import pandas.util._test_decorators as td
import pandas as pd
from pandas import (
DataFrame,
DatetimeIndex,
Index,
NaT,
Period,
PeriodIndex,
Series,
date_range,
offsets,
period_range,
)
import pandas._testing as tm
from ..datetimelike import DatetimeLike
class TestPeriodIndex(DatetimeLike):
_holder = PeriodIndex
@pytest.fixture(
params=[
tm.makePeriodIndex(10),
period_range("20130101", periods=10, freq="D")[::-1],
],
ids=["index_inc", "index_dec"],
)
def index(self, request):
return request.param
def create_index(self) -> PeriodIndex:
return period_range("20130101", periods=5, freq="D")
def test_pickle_compat_construction(self):
pass
@pytest.mark.parametrize("freq", ["D", "M", "A"])
def test_pickle_round_trip(self, freq):
idx = PeriodIndex(["2016-05-16", "NaT", NaT, np.NaN], freq=freq)
result = tm.round_trip_pickle(idx)
tm.assert_index_equal(result, idx)
def test_where(self):
# This is handled in test_indexing
pass
@pytest.mark.parametrize("use_numpy", [True, False])
@pytest.mark.parametrize(
"index",
[
period_range("2000-01-01", periods=3, freq="D"),
period_range("2001-01-01", periods=3, freq="2D"),
PeriodIndex(["2001-01", "NaT", "2003-01"], freq="M"),
],
)
def test_repeat_freqstr(self, index, use_numpy):
# GH10183
expected = PeriodIndex([p for p in index for _ in range(3)])
result = np.repeat(index, 3) if use_numpy else index.repeat(3)
tm.assert_index_equal(result, expected)
assert result.freqstr == index.freqstr
def test_no_millisecond_field(self):
msg = "type object 'DatetimeIndex' has no attribute 'millisecond'"
with pytest.raises(AttributeError, match=msg):
DatetimeIndex.millisecond
msg = "'DatetimeIndex' object has no attribute 'millisecond'"
with pytest.raises(AttributeError, match=msg):
DatetimeIndex([]).millisecond
def test_make_time_series(self):
index = period_range(freq="A", start="1/1/2001", end="12/1/2009")
series = Series(1, index=index)
assert isinstance(series, Series)
def test_shallow_copy_empty(self):
# GH13067
idx = PeriodIndex([], freq="M")
result = idx._shallow_copy()
expected = idx
tm.assert_index_equal(result, expected)
def test_shallow_copy_disallow_i8(self):
# GH-24391
pi = period_range("2018-01-01", periods=3, freq="2D")
with pytest.raises(AssertionError, match="ndarray"):
pi._shallow_copy(pi.asi8)
def test_shallow_copy_requires_disallow_period_index(self):
pi = period_range("2018-01-01", periods=3, freq="2D")
with pytest.raises(AssertionError, match="PeriodIndex"):
pi._shallow_copy(pi)
def test_view_asi8(self):
idx = PeriodIndex([], freq="M")
exp = np.array([], dtype=np.int64)
tm.assert_numpy_array_equal(idx.view("i8"), exp)
tm.assert_numpy_array_equal(idx.asi8, exp)
idx = PeriodIndex(["2011-01", NaT], freq="M")
exp = np.array([492, -9223372036854775808], dtype=np.int64)
tm.assert_numpy_array_equal(idx.view("i8"), exp)
tm.assert_numpy_array_equal(idx.asi8, exp)
exp = np.array([14975, -9223372036854775808], dtype=np.int64)
idx = PeriodIndex(["2011-01-01", NaT], freq="D")
tm.assert_numpy_array_equal(idx.view("i8"), exp)
tm.assert_numpy_array_equal(idx.asi8, exp)
def test_values(self):
idx = PeriodIndex([], freq="M")
exp = np.array([], dtype=object)
tm.assert_numpy_array_equal(idx.values, exp)
tm.assert_numpy_array_equal(idx.to_numpy(), exp)
exp = np.array([], dtype=np.int64)
tm.assert_numpy_array_equal(idx.asi8, exp)
idx = PeriodIndex(["2011-01", NaT], freq="M")
exp = np.array([Period("2011-01", freq="M"), NaT], dtype=object)
tm.assert_numpy_array_equal(idx.values, exp)
tm.assert_numpy_array_equal(idx.to_numpy(), exp)
exp = np.array([492, -9223372036854775808], dtype=np.int64)
tm.assert_numpy_array_equal(idx.asi8, exp)
idx = PeriodIndex(["2011-01-01", NaT], freq="D")
exp = np.array([Period("2011-01-01", freq="D"), NaT], dtype=object)
tm.assert_numpy_array_equal(idx.values, exp)
tm.assert_numpy_array_equal(idx.to_numpy(), exp)
exp = np.array([14975, -9223372036854775808], dtype=np.int64)
tm.assert_numpy_array_equal(idx.asi8, exp)
def test_period_index_length(self):
pi = period_range(freq="A", start="1/1/2001", end="12/1/2009")
assert len(pi) == 9
pi = period_range(freq="Q", start="1/1/2001", end="12/1/2009")
assert len(pi) == 4 * 9
pi = period_range(freq="M", start="1/1/2001", end="12/1/2009")
assert len(pi) == 12 * 9
start = Period("02-Apr-2005", "B")
i1 = period_range(start=start, periods=20)
assert len(i1) == 20
assert i1.freq == start.freq
assert i1[0] == start
end_intv = Period("2006-12-31", "W")
i1 = period_range(end=end_intv, periods=10)
assert len(i1) == 10
assert i1.freq == end_intv.freq
assert i1[-1] == end_intv
end_intv = Period("2006-12-31", "1w")
i2 = period_range(end=end_intv, periods=10)
assert len(i1) == len(i2)
assert (i1 == i2).all()
assert i1.freq == i2.freq
msg = "start and end must have same freq"
with pytest.raises(ValueError, match=msg):
period_range(start=start, end=end_intv)
end_intv = Period("2005-05-01", "B")
i1 = period_range(start=start, end=end_intv)
msg = (
"Of the three parameters: start, end, and periods, exactly two "
"must be specified"
)
with pytest.raises(ValueError, match=msg):
period_range(start=start)
# infer freq from first element
i2 = PeriodIndex([end_intv, Period("2005-05-05", "B")])
assert len(i2) == 2
assert i2[0] == end_intv
i2 = PeriodIndex(np.array([end_intv, Period("2005-05-05", "B")]))
assert len(i2) == 2
assert i2[0] == end_intv
# Mixed freq should fail
vals = [end_intv, Period("2006-12-31", "w")]
msg = r"Input has different freq=W-SUN from PeriodIndex\(freq=B\)"
with pytest.raises(IncompatibleFrequency, match=msg):
PeriodIndex(vals)
vals = np.array(vals)
with pytest.raises(ValueError, match=msg):
PeriodIndex(vals)
def test_fields(self):
# year, month, day, hour, minute
# second, weekofyear, week, dayofweek, weekday, dayofyear, quarter
# qyear
pi = period_range(freq="A", start="1/1/2001", end="12/1/2005")
self._check_all_fields(pi)
pi = period_range(freq="Q", start="1/1/2001", end="12/1/2002")
self._check_all_fields(pi)
pi = period_range(freq="M", start="1/1/2001", end="1/1/2002")
self._check_all_fields(pi)
pi = period_range(freq="D", start="12/1/2001", end="6/1/2001")
self._check_all_fields(pi)
pi = period_range(freq="B", start="12/1/2001", end="6/1/2001")
self._check_all_fields(pi)
pi = period_range(freq="H", start="12/31/2001", end="1/1/2002 23:00")
self._check_all_fields(pi)
pi = period_range(freq="Min", start="12/31/2001", end="1/1/2002 00:20")
self._check_all_fields(pi)
pi = period_range(
freq="S", start="12/31/2001 00:00:00", end="12/31/2001 00:05:00"
)
self._check_all_fields(pi)
end_intv = Period("2006-12-31", "W")
i1 = period_range(end=end_intv, periods=10)
self._check_all_fields(i1)
def _check_all_fields(self, periodindex):
fields = [
"year",
"month",
"day",
"hour",
"minute",
"second",
"weekofyear",
"week",
"dayofweek",
"day_of_week",
"dayofyear",
"day_of_year",
"quarter",
"qyear",
"days_in_month",
]
periods = list(periodindex)
s = Series(periodindex)
for field in fields:
field_idx = getattr(periodindex, field)
assert len(periodindex) == len(field_idx)
for x, val in zip(periods, field_idx):
assert getattr(x, field) == val
if len(s) == 0:
continue
field_s = getattr(s.dt, field)
assert len(periodindex) == len(field_s)
for x, val in zip(periods, field_s):
assert getattr(x, field) == val
def test_is_(self):
create_index = lambda: period_range(freq="A", start="1/1/2001", end="12/1/2009")
index = create_index()
assert index.is_(index)
assert not index.is_(create_index())
assert index.is_(index.view())
assert index.is_(index.view().view().view().view().view())
assert index.view().is_(index)
ind2 = index.view()
index.name = "Apple"
assert ind2.is_(index)
assert not index.is_(index[:])
assert not index.is_(index.asfreq("M"))
assert not index.is_(index.asfreq("A"))
assert not index.is_(index - 2)
assert not index.is_(index - 0)
def test_periods_number_check(self):
msg = (
"Of the three parameters: start, end, and periods, exactly two "
"must be specified"
)
with pytest.raises(ValueError, match=msg):
period_range("2011-1-1", "2012-1-1", "B")
def test_index_duplicate_periods(self):
# monotonic
idx = PeriodIndex([2000, 2007, 2007, 2009, 2009], freq="A-JUN")
ts = Series(np.random.randn(len(idx)), index=idx)
result = ts["2007"]
expected = ts[1:3]
tm.assert_series_equal(result, expected)
result[:] = 1
assert (ts[1:3] == 1).all()
# not monotonic
idx = PeriodIndex([2000, 2007, 2007, 2009, 2007], freq="A-JUN")
ts = Series(np.random.randn(len(idx)), index=idx)
result = ts["2007"]
expected = ts[idx == "2007"]
tm.assert_series_equal(result, expected)
def test_index_unique(self):
idx = PeriodIndex([2000, 2007, 2007, 2009, 2009], freq="A-JUN")
expected = PeriodIndex([2000, 2007, 2009], freq="A-JUN")
tm.assert_index_equal(idx.unique(), expected)
assert idx.nunique() == 3
def test_shift(self):
# This is tested in test_arithmetic
pass
@td.skip_if_32bit
def test_ndarray_compat_properties(self):
super().test_ndarray_compat_properties()
def test_negative_ordinals(self):
Period(ordinal=-1000, freq="A")
Period(ordinal=0, freq="A")
idx1 = PeriodIndex(ordinal=[-1, 0, 1], freq="A")
idx2 = PeriodIndex(ordinal=np.array([-1, 0, 1]), freq="A")
tm.assert_index_equal(idx1, idx2)
def test_pindex_fieldaccessor_nat(self):
idx = PeriodIndex(
["2011-01", "2011-02", "NaT", "2012-03", "2012-04"], freq="D", name="name"
)
exp = Index([2011, 2011, -1, 2012, 2012], dtype=np.int64, name="name")
tm.assert_index_equal(idx.year, exp)
exp = Index([1, 2, -1, 3, 4], dtype=np.int64, name="name")
tm.assert_index_equal(idx.month, exp)
def test_pindex_qaccess(self):
pi = PeriodIndex(["2Q05", "3Q05", "4Q05", "1Q06", "2Q06"], freq="Q")
s = Series(np.random.rand(len(pi)), index=pi).cumsum()
# Todo: fix these accessors!
assert s["05Q4"] == s[2]
def test_pindex_multiples(self):
expected = PeriodIndex(
["2011-01", "2011-03", "2011-05", "2011-07", "2011-09", "2011-11"],
freq="2M",
)
pi = period_range(start="1/1/11", end="12/31/11", freq="2M")
tm.assert_index_equal(pi, expected)
assert pi.freq == offsets.MonthEnd(2)
assert pi.freqstr == "2M"
pi = period_range(start="1/1/11", periods=6, freq="2M")
tm.assert_index_equal(pi, expected)
assert pi.freq == offsets.MonthEnd(2)
assert pi.freqstr == "2M"
def test_iteration(self):
index = period_range(start="1/1/10", periods=4, freq="B")
result = list(index)
assert isinstance(result[0], Period)
assert result[0].freq == index.freq
def test_is_full(self):
index = PeriodIndex([2005, 2007, 2009], freq="A")
assert not index.is_full
index = PeriodIndex([2005, 2006, 2007], freq="A")
assert index.is_full
index = PeriodIndex([2005, 2005, 2007], freq="A")
assert not index.is_full
index = PeriodIndex([2005, 2005, 2006], freq="A")
assert index.is_full
index = PeriodIndex([2006, 2005, 2005], freq="A")
with pytest.raises(ValueError, match="Index is not monotonic"):
index.is_full
assert index[:0].is_full
def test_with_multi_index(self):
# #1705
index = date_range("1/1/2012", periods=4, freq="12H")
index_as_arrays = [index.to_period(freq="D"), index.hour]
s = Series([0, 1, 2, 3], index_as_arrays)
assert isinstance(s.index.levels[0], PeriodIndex)
assert isinstance(s.index.values[0][0], Period)
def test_convert_array_of_periods(self):
rng = period_range("1/1/2000", periods=20, freq="D")
periods = list(rng)
result = Index(periods)
assert isinstance(result, PeriodIndex)
def test_append_concat(self):
# #1815
d1 = date_range("12/31/1990", "12/31/1999", freq="A-DEC")
d2 = date_range("12/31/2000", "12/31/2009", freq="A-DEC")
s1 = Series(np.random.randn(10), d1)
s2 = Series(np.random.randn(10), d2)
s1 = s1.to_period()
s2 = s2.to_period()
# drops index
result = pd.concat([s1, s2])
assert isinstance(result.index, PeriodIndex)
assert result.index[0] == s1.index[0]
def test_pickle_freq(self):
# GH2891
prng = period_range("1/1/2011", "1/1/2012", freq="M")
new_prng = tm.round_trip_pickle(prng)
assert new_prng.freq == offsets.MonthEnd()
assert new_prng.freqstr == "M"
def test_map(self):
# test_map_dictlike generally tests
index = PeriodIndex([2005, 2007, 2009], freq="A")
result = index.map(lambda x: x.ordinal)
exp = Index([x.ordinal for x in index])
tm.assert_index_equal(result, exp)
def test_insert(self):
# GH 18295 (test missing)
expected = PeriodIndex(["2017Q1", NaT, "2017Q2", "2017Q3", "2017Q4"], freq="Q")
for na in (np.nan, NaT, None):
result = period_range("2017Q1", periods=4, freq="Q").insert(1, na)
tm.assert_index_equal(result, expected)
@pytest.mark.parametrize(
"msg, key",
[
(r"Period\('2019', 'A-DEC'\), 'foo', 'bar'", (Period(2019), "foo", "bar")),
(r"Period\('2019', 'A-DEC'\), 'y1', 'bar'", (Period(2019), "y1", "bar")),
(r"Period\('2019', 'A-DEC'\), 'foo', 'z1'", (Period(2019), "foo", "z1")),
(
r"Period\('2018', 'A-DEC'\), Period\('2016', 'A-DEC'\), 'bar'",
(Period(2018), Period(2016), "bar"),
),
(r"Period\('2018', 'A-DEC'\), 'foo', 'y1'", (Period(2018), "foo", "y1")),
(
r"Period\('2017', 'A-DEC'\), 'foo', Period\('2015', 'A-DEC'\)",
(Period(2017), "foo", Period(2015)),
),
(r"Period\('2017', 'A-DEC'\), 'z1', 'bar'", (Period(2017), "z1", "bar")),
],
)
def test_contains_raise_error_if_period_index_is_in_multi_index(self, msg, key):
# issue 20684
"""
parse_time_string return parameter if type not matched.
PeriodIndex.get_loc takes returned value from parse_time_string as a tuple.
If first argument is Period and a tuple has 3 items,
process go on not raise exception
"""
df = DataFrame(
{
"A": [Period(2019), "x1", "x2"],
"B": [Period(2018), Period(2016), "y1"],
"C": [Period(2017), "z1", Period(2015)],
"V1": [1, 2, 3],
"V2": [10, 20, 30],
}
).set_index(["A", "B", "C"])
with pytest.raises(KeyError, match=msg):
df.loc[key]
def test_format_empty(self):
# GH35712
empty_idx = self._holder([], freq="A")
assert empty_idx.format() == []
assert empty_idx.format(name=True) == [""]
def test_maybe_convert_timedelta():
pi = PeriodIndex(["2000", "2001"], freq="D")
offset = offsets.Day(2)
assert pi._maybe_convert_timedelta(offset) == 2
assert pi._maybe_convert_timedelta(2) == 2
offset = offsets.BusinessDay()
msg = r"Input has different freq=B from PeriodIndex\(freq=D\)"
with pytest.raises(ValueError, match=msg):
pi._maybe_convert_timedelta(offset)
def test_is_monotonic_with_nat():
# GH#31437
# PeriodIndex.is_monotonic should behave analogously to DatetimeIndex,
# in particular never be monotonic when we have NaT
dti = date_range("2016-01-01", periods=3)
pi = dti.to_period("D")
tdi = Index(dti.view("timedelta64[ns]"))
for obj in [pi, pi._engine, dti, dti._engine, tdi, tdi._engine]:
if isinstance(obj, Index):
# i.e. not Engines
assert obj.is_monotonic
assert obj.is_monotonic_increasing
assert not obj.is_monotonic_decreasing
assert obj.is_unique
dti1 = dti.insert(0, NaT)
pi1 = dti1.to_period("D")
tdi1 = Index(dti1.view("timedelta64[ns]"))
for obj in [pi1, pi1._engine, dti1, dti1._engine, tdi1, tdi1._engine]:
if isinstance(obj, Index):
# i.e. not Engines
assert not obj.is_monotonic
assert not obj.is_monotonic_increasing
assert not obj.is_monotonic_decreasing
assert obj.is_unique
dti2 = dti.insert(3, NaT)
pi2 = dti2.to_period("H")
tdi2 = Index(dti2.view("timedelta64[ns]"))
for obj in [pi2, pi2._engine, dti2, dti2._engine, tdi2, tdi2._engine]:
if isinstance(obj, Index):
# i.e. not Engines
assert not obj.is_monotonic
assert not obj.is_monotonic_increasing
assert not obj.is_monotonic_decreasing
assert obj.is_unique
@pytest.mark.parametrize("array", [True, False])
def test_dunder_array(array):
obj = PeriodIndex(["2000-01-01", "2001-01-01"], freq="D")
if array:
obj = obj._data
expected = np.array([obj[0], obj[1]], dtype=object)
result = np.array(obj)
tm.assert_numpy_array_equal(result, expected)
result = np.asarray(obj)
tm.assert_numpy_array_equal(result, expected)
expected = obj.asi8
for dtype in ["i8", "int64", np.int64]:
result = np.array(obj, dtype=dtype)
tm.assert_numpy_array_equal(result, expected)
result = np.asarray(obj, dtype=dtype)
tm.assert_numpy_array_equal(result, expected)
for dtype in ["float64", "int32", "uint64"]:
msg = "argument must be"
with pytest.raises(TypeError, match=msg):
np.array(obj, dtype=dtype)
with pytest.raises(TypeError, match=msg):
np.array(obj, dtype=getattr(np, dtype))
|
|
#!/bin/sh
"exec" "`dirname $0`/opt/miniconda2/bin/python" "$0" "$@"
import argparse
import os
import sys
import subprocess
import logging
import shutil
import time
import signal
import itertools
import glob
import tempfile
import re
import pysam
from liftover_restricted_vcf_map import lift_vcfs, lift_maps
from generate_small_test_ref import gen_restricted_ref_and_vcfs
from utils import makedirs, run_shell_command, versatile_open, get_loglevel, check_java, MY_DIR, VARSIMJAR, get_version
import utils
REQUIRE_VARSIMJAR = not os.path.isfile(VARSIMJAR)
if REQUIRE_VARSIMJAR: VARSIMJAR = None
def convertCN(filenames, operation):
"""
convert '2/1'-like copy number to a single number(e.g. 2)
0 will be considered same as 1
by default the max number will be kept
the change is in place
"""
logger = logging.getLogger(convertCN.__name__)
logger.info("convertCN started")
if operation != "two2one" and operation != "one2two":
raise ValueError("Only two2one or one2two allowed")
two2one = operation == "two2one"
delimiter = re.compile('[/|]')
for name in filenames:
logger.info("processing {}".format(name))
with versatile_open(name, 'r') as file_fd:
output = tempfile.NamedTemporaryFile(mode = 'r+w', delete = False)
for l in file_fd:
l = l.rstrip()
fields = l.split("\t")
if l.startswith("#") or 'CN' not in fields[8]:
if l.startswith('##FORMAT=<ID=CN'):
if two2one:
l = l.replace("Type=String","Type=Integer")
else:
l = l.replace("Type=Integer", "Type=String")
output.write(l + "\n")
else:
info = fields[8].split(':')
cnIndex = info.index('CN')
gtIndex = info.index('GT')
#change CN field in all samples
for sampleIndex in range(9,len(fields)):
sampleInfo = fields[sampleIndex].split(':')
if two2one:
cn = delimiter.split(sampleInfo[cnIndex])
#here cn is list of strings
sampleInfo[cnIndex] = str(max(map(int, cn)))
elif len(delimiter.split(sampleInfo[cnIndex])) == 1:
#only split when there is only one number
gt = delimiter.split(sampleInfo[gtIndex])
cn = sampleInfo[cnIndex]
for i in range(len(gt)):
gt[i] = '1' if gt[i] == '0' else cn
if sampleInfo[gtIndex].find('/') >= 0:
sampleInfo[cnIndex] = '/'.join(gt)
else:
sampleInfo[cnIndex] = '|'.join(gt)
fields[sampleIndex] = ":".join(sampleInfo)
output.write("\t".join(fields) + "\n")
output.close()
shutil.copyfile(output.name, name)
os.remove(output.name)
logger.info("convertCN done")
return
def get_contigs_list(reference):
with open("%s.fai" % (reference)) as fai_file:
contigs = [line.strip().split()[0] for line in fai_file.readlines()]
return contigs
def monitor_processes(processes):
logger = logging.getLogger(monitor_processes.__name__)
while processes:
time.sleep(1)
kill_all = False
processes_running = []
for p in processes:
status = p.poll()
if status is not None:
logger.info("Process %s exited with code %d" % (p.pid, status))
if status != 0:
kill_all = True
logger.error("Process %s failed. Will kill the remaining processes." % p.pid)
else:
processes_running.append(p)
if kill_all:
for p in processes:
status = p.poll()
if status is None:
try:
os.killpg(p.pid, signal.SIGTERM)
except OSError, e:
try:
os.killpg(p.pid, signal.SIGKILL)
except OSError, ex:
logger.error("Could not kill the process " + str(p.pid))
raise Exception('Aborting... Please check log for details.')
processes = processes_running
return []
def concatenate_files(files, merged, header_str="", simple_cat=True, remove_original=False):
logger = logging.getLogger(concatenate_files.__name__)
logger.info("Concatenating " + " ".join(files) + " as " + merged)
with open(merged, "w") as merged_fd:
for index, f in enumerate(files):
with open(f) as fd:
if simple_cat:
shutil.copyfileobj(fd, merged_fd)
else:
for line in fd:
if line.strip() and (not index or not header_str or not line.startswith(header_str)):
merged_fd.write(line)
if remove_original:
logger.info("Removing " + f)
os.remove(f)
def check_executable(fpath):
logger = logging.getLogger(check_executable.__name__)
if not os.path.isfile(fpath):
logger.error("ERROR: File %s does not exist\n" % (fpath))
sys.exit(os.EX_NOINPUT)
if not os.access(fpath, os.X_OK):
logger.error("ERROR: File %s is not executable\n" % (fpath))
sys.exit(os.EX_NOINPUT)
def fill_missing_sequences(vcf, id, seq_file, reference, work_dir, log_dir, java = "java"):
logger = logging.getLogger(fill_missing_sequences.__name__)
out_vcf = os.path.join(work_dir, os.path.basename(vcf))
if out_vcf.endswith(".gz"):
out_vcf = out_vcf[:-3]
out_log = os.path.join(log_dir, "%s_fill_missing.log" % (os.path.basename(vcf)))
command = [java, utils.JAVA_XMX, "-jar", VARSIMJAR, "randsequencevcf", "-id", id, "-in_vcf", vcf, "-seq", seq_file, "-out_vcf", out_vcf, "-ref", reference]
with open(out_log, "w") as log_fd:
logger.info("Running command " + " ".join(command))
run_shell_command(" ".join(command), cmd_stdout=None, cmd_stderr=log_fd)
return out_vcf
def run_vcfstats(vcfs, out_dir, log_dir, java = "java"):
logger = logging.getLogger(run_vcfstats.__name__)
processes = []
for in_vcf in vcfs:
out_prefix = os.path.basename(in_vcf)
vcfstats_stdout = open(os.path.join(out_dir, "%s.stats" % (out_prefix)), "w")
vcfstats_stderr = open(os.path.join(log_dir, "%s.vcfstats.err" % (out_prefix)), "w")
vcfstats_command = [java, utils.JAVA_XMX, "-jar", VARSIMJAR, "vcfstats", "-vcf",
in_vcf]
logger.info("Executing command " + " ".join(vcfstats_command))
run_shell_command(vcfstats_command, cmd_stdout=vcfstats_stdout, cmd_stderr=vcfstats_stderr)
return processes
class RandVCFOptions:
def __init__(self, num_snp, num_ins, num_del, num_mnp, num_complex, percent_novel, min_length, max_length, prop_het, num_dup = 0, num_inv = 0):
self.num_snp = num_snp
self.num_ins = num_ins
self.num_del = num_del
self.num_mnp = num_mnp
self.num_dup = num_dup
self.num_inv = num_inv
self.num_complex = num_complex
self.percent_novel = percent_novel
self.min_length = min_length
self.max_length = max_length
self.prop_het = prop_het
class RandDGVOptions:
def __init__(self, num_ins, num_del, num_dup, num_inv, percent_novel, min_length, max_length, prop_het, output_all = " "):
self.num_ins = num_ins
self.num_del = num_del
self.num_dup = num_dup
self.num_inv = num_inv
self.percent_novel = percent_novel
self.min_length = min_length
self.max_length = max_length
self.prop_het = prop_het
self.output_all = output_all
def randdgv_options2randvcf_options(randdgv_options):
'''
automatically set up shared fields between RandVCFOptions and RandDGVOptions
:param randdgv_options:
:return: RandVCFOptions instance
'''
return RandVCFOptions(
num_snp= 0,
num_ins = randdgv_options.num_ins,
num_del = randdgv_options.num_del,
num_mnp = 0,
num_complex = 0,
percent_novel= randdgv_options.percent_novel,
min_length = randdgv_options.min_length,
max_length = randdgv_options.max_length,
prop_het=randdgv_options.prop_het,
num_dup = randdgv_options.num_dup,
num_inv = randdgv_options.num_inv
)
def run_randvcf(sampling_vcf, out_vcf_fd, log_file_fd, seed, sex, randvcf_options, reference, sample_id, java = "java"):
logger = logging.getLogger(run_randvcf.__name__)
rand_vcf_command = [java, utils.JAVA_XMX, "-jar", VARSIMJAR, "randvcf2vcf",
"-seed", str(seed),
"-t", sex,
"-num_snp", str(randvcf_options.num_snp),
"-num_ins", str(randvcf_options.num_ins),
"-num_del", str(randvcf_options.num_del),
"-num_mnp", str(randvcf_options.num_mnp),
"-num_complex", str(randvcf_options.num_complex),
"-num_dup", str(randvcf_options.num_dup),
"-num_inv", str(randvcf_options.num_inv),
"-novel", str(randvcf_options.percent_novel),
"-min_len", str(randvcf_options.min_length),
"-max_len", str(randvcf_options.max_length),
"-prop_het", str(randvcf_options.prop_het),
"-ref", os.path.realpath(reference),
"-id", "'" + str(sample_id) + "'",
"-vcf", sampling_vcf]
logger.info("Executing command " + " ".join(rand_vcf_command))
run_shell_command(rand_vcf_command, cmd_stdout=out_vcf_fd, cmd_stderr=log_file_fd)
return
def run_randdgv(dgv_file, out_vcf_fd, log_file_fd, seed, sex, options, reference, insert_seq_file, sample_id, java = "java"):
logger = logging.getLogger(run_randdgv.__name__)
rand_dgv_command = [java, utils.JAVA_XMX, "-jar", VARSIMJAR, "randdgv2vcf",
"-t", sex,
"-seed", str(seed),
"-num_ins", str(options.num_ins),
"-num_del", str(options.num_del),
"-num_dup", str(options.num_dup),
"-num_inv", str(options.num_inv),
"-novel", str(options.percent_novel),
"-min_len", str(options.min_length),
"-max_len", str(options.max_length),
"-prop_het", str(options.prop_het),
"-ref", os.path.realpath(reference),
"-ins", os.path.realpath(insert_seq_file),
"-id", "'" + str(sample_id) + "'", # allow empty string ID
"-dgv", os.path.realpath(dgv_file)]
if len(options.output_all.strip()) > 0:
rand_dgv_command.append(options.output_all)
logger.info("Executing command " + " ".join(rand_dgv_command))
run_shell_command(rand_dgv_command, cmd_stdout=out_vcf_fd, cmd_stderr=log_file_fd)
return
def varsim_main(reference,
simulator, # use None to disable simulation
simulator_exe,
total_coverage,
variant_vcfs=[],
sampling_vcf=None,
dgv_file=None,
randvcf_options=None, # use None to disable RandVCF
randdgv_options=None, # use None to disable RandDGV
nlanes=1,
simulator_options="",
sample_id="VarSim_Sample",
log_dir="log",
out_dir="out",
sv_insert_seq=None,
seed=0,
sex="MALE",
remove_filtered=False,
keep_temp=False,
force_five_base_encoding=False,
lift_ref=False,
disable_vcf2diploid=False,
java="java"):
check_java(java)
# make the directories we need
makedirs([log_dir, out_dir])
logger = logging.getLogger(varsim_main.__name__)
# Make sure we can actually execute the executable
if simulator:
if simulator not in ["dwgsim", "art", "longislnd"]:
raise NotImplementedError("Simulation method {} not implemented".format(simulator))
check_executable(simulator_exe)
processes = []
t_s = time.time()
variant_vcfs = map(os.path.realpath, variant_vcfs)
if sv_insert_seq:
in_vcfs = []
for i, vcf in enumerate(variant_vcfs):
tool_work_dir = os.path.join(out_dir, "filled_in", str(i))
makedirs([tool_work_dir])
in_vcfs.append(fill_missing_sequences(vcf, sample_id, os.path.realpath(sv_insert_seq), reference, tool_work_dir, tool_work_dir, java))
variant_vcfs = map(os.path.realpath, in_vcfs)
else:
logger.warn("Not filling in SV sequences since no insert sequence file provided")
open_fds = []
if randvcf_options:
if not sampling_vcf:
logger.error("Need to provide the VCF for random sampling")
raise ValueError("Sampling VCF missing")
rand_vcf_out_fd = open(os.path.join(out_dir, "random.vc.vcf"), "w")
rand_vcf_log_fd = open(os.path.join(log_dir, "RandVCF2VCF.err"), "w")
variant_vcfs.append(os.path.realpath(rand_vcf_out_fd.name))
run_randvcf(os.path.realpath(sampling_vcf), rand_vcf_out_fd, rand_vcf_log_fd, seed, sex, randvcf_options, reference, sample_id, java)
open_fds += [rand_vcf_out_fd, rand_vcf_log_fd]
if randdgv_options:
if not sv_insert_seq:
raise ValueError("Need SV sequence file to fill in SV sequences")
if not dgv_file:
logger.error("Need to provide the DGV file for random sampling")
raise ValueError("DGV file missing")
rand_dgv_stdout = open(os.path.join(out_dir, "random.sv.vcf"), "w")
rand_dgv_stderr = open(os.path.join(log_dir, "RandDGV2VCF.err"), "w")
variant_vcfs.append(os.path.realpath(rand_dgv_stdout.name))
run_randdgv(dgv_file, rand_dgv_stdout, rand_dgv_stderr, seed, sex, randdgv_options, reference, sv_insert_seq, sample_id, java)
open_fds += [rand_dgv_stdout, rand_dgv_stderr]
processes = monitor_processes(processes)
for open_fd in open_fds:
open_fd.close()
merged_reference = os.path.join(out_dir, "%s.fa" % (sample_id))
merged_truth_vcf = os.path.join(out_dir, "%s.truth.vcf" % (sample_id))
merged_map = os.path.join(out_dir, "%s.map" % (sample_id))
processes = run_vcfstats(variant_vcfs, out_dir, log_dir, java)
if not disable_vcf2diploid:
logger.info("vcf2diploid started")
vcf2diploid_stdout = open(os.path.join(out_dir, "vcf2diploid.out"), "w")
vcf2diploid_stderr = open(os.path.join(log_dir, "vcf2diploid.err"), "w")
vcf_arg_list = sum([["-vcf", v] for v in variant_vcfs], [])
filter_arg_list = ["-pass"] if remove_filtered else []
vcf2diploid_command = [java, utils.JAVA_XMX, "-jar", VARSIMJAR, "vcf2diploid",
"-t", sex,
"-id", sample_id,
"-chr", os.path.realpath(reference)] + filter_arg_list + vcf_arg_list + ["-no_contig_id"]
logger.info("Executing command " + " ".join(vcf2diploid_command))
run_shell_command(vcf2diploid_command, cmd_stdout=vcf2diploid_stdout, cmd_stderr=vcf2diploid_stderr,
cmd_dir=out_dir)
processes = monitor_processes(processes)
# Now concatenate the .fa from vcf2diploid
contigs = get_contigs_list(reference)
contig_fastas = map(lambda (x, y): os.path.join(out_dir, "%s_%s_%s.fa" % (x, sample_id, y)), itertools.product(contigs, ["maternal", "paternal"]))
fastas_to_cat = filter(os.path.isfile, contig_fastas)
concatenate_files(fastas_to_cat, merged_reference, remove_original=True)
if os.path.getsize(merged_reference) == 0:
logger.error("Merged FASTA is empty. Something bad happened. Exiting")
raise RuntimeError("Empty FASTA generated by vcf2diploid")
# contatenate the vcfs
vcfs_to_cat = filter(os.path.isfile, map(lambda x: os.path.join(out_dir, "%s_%s.vcf" % (x, sample_id)), contigs))
concatenate_files(vcfs_to_cat, merged_truth_vcf, header_str="#", simple_cat=False, remove_original=True)
run_vcfstats([merged_truth_vcf], out_dir, log_dir, java)
logger.info("vcf2diploid done")
if lift_ref:
lifted_dir = os.path.join(out_dir, "lifted")
makedirs([lifted_dir])
#quick fix for issue of CN
convertCN([merged_truth_vcf], "two2one")
merged_truth_vcf = lift_vcfs([merged_truth_vcf], os.path.join(lifted_dir, "truth.vcf"), None, tabix_index=False)
#quick fix for issue of CN
convertCN([merged_truth_vcf], "one2two")
pysam.tabix_index(merged_truth_vcf, force=True, preset='vcf')
merged_map = lift_maps([merged_map], os.path.join(lifted_dir, "truth.map"))
if processes:
processes = monitor_processes(processes)
# Now generate the reads using art/pbsim/dwgsim
tmp_files = []
if simulator:
fifos = []
fastqs = []
sim_ts = time.time()
coverage_per_lane = total_coverage * 0.5 / nlanes
processes = []
fifo_src_dst = []
if simulator == "dwgsim":
for i, end in itertools.product(xrange(nlanes), [1, 2]):
fifo_src_dst.append(
("simulated.lane%d.read%d.fastq" % (i, end),
"simulated.lane%d.read%d.fq.gz" % (i, end)))
elif simulator == "art":
for i, end, suffix in itertools.product(xrange(nlanes), [1, 2], ["fq", "aln"]):
fifo_src_dst.append(("simulated.lane%d.read%d.%s" % (i, end, suffix),
"simulated.lane%d.read%d.%s.gz" % (i, end, suffix)))
else: # simulator == "longislnd":
pass
for fifo_name, dst in fifo_src_dst:
fifos.append(os.path.join(out_dir, fifo_name))
if os.path.exists(fifos[-1]): os.remove(fifos[-1])
os.mkfifo(fifos[-1])
gzip_stderr = open(os.path.join(log_dir, "gzip.%s" % (fifo_name)), "w")
gzip_command = "cat %s | gzip -2 > %s" % (fifos[-1], os.path.join(out_dir, dst))
logger.info("Executing command %s" % (gzip_command) )
gzip_p = subprocess.Popen(gzip_command, stdout = None, stderr = gzip_stderr, shell = True)
logger.info( " with pid " + str(gzip_p.pid))
processes.append(gzip_p)
tmp_files.append(os.path.join(out_dir, dst))
simulator_commands_files = []
if simulator == "dwgsim":
for i in xrange(nlanes):
simulator_command = "{} {} -C {} -z {} {} {}".format(os.path.realpath(simulator_exe), simulator_options, coverage_per_lane, seed + i, merged_reference, os.path.join(out_dir, "simulated.lane%d" % (i)))
simulator_commands_files.append((simulator_command, os.path.join(log_dir, "dwgsim.lane%d.out" % (i)), os.path.join(log_dir, "dwgsim.lane%d.err" % (i))))
elif simulator == "art":
for i in xrange(nlanes):
simulator_command = "{} {} -i {} -f {} -rs {} -o {}".format(simulator_exe, simulator_options, merged_reference, coverage_per_lane, seed + i, os.path.join(out_dir, "simulated.lane%d.read" % (i)))
simulator_commands_files.append((simulator_command, os.path.join(log_dir, "art.lane%d.out" % (i)), os.path.join(log_dir, "art.lane%d.err" % (i))))
else: # simulator == "longislnd":
simulator_command = "{} {} --coverage {} --out {} --fasta {}".format(simulator_exe, simulator_options, total_coverage * 0.5, os.path.join(out_dir, "longislnd_sim"), merged_reference)
simulator_commands_files.append((simulator_command, os.path.join(log_dir, "longislnd.out"), os.path.join(log_dir, "longislnd.err")))
simulator_fds = []
for command, stdout, stderr in simulator_commands_files:
stdout_fd = open(stdout, "w")
stderr_fd = open(stderr, "w")
process = subprocess.Popen(command, stdout=stdout_fd, stderr=stderr_fd, shell=True, close_fds=True)
logger.info("Executing command {} with pid {}".format(command, process.pid))
processes.append(process)
simulator_fds += [stdout_fd, stderr_fd]
monitor_processes(processes)
for fd in simulator_fds:
fd.close()
processes = []
logger.info("Read generation took %g seconds" % (time.time() - sim_ts))
sim_t_liftover = time.time()
# Now start lifting over the gzipped files
if simulator != "longislnd":
for i in xrange(nlanes):
liftover_stdout = open(os.path.join(log_dir, "lane%d.out" % (i)), "w")
liftover_stderr = open(os.path.join(log_dir, "liftover%d.log" % (i)), "w")
fastq_liftover_command = "%s -server %s -jar %s fastq_liftover -map %s -id %d " \
"-fastq <(gunzip -c %s/simulated.lane%d.read1.fq.gz) " \
"-fastq <(gunzip -c %s/simulated.lane%d.read2.fq.gz) " \
"-out >(gzip -1 > %s/lane%d.read1.fq.gz) " \
"-out >(gzip -1 > %s/lane%d.read2.fq.gz)" % (
java,
utils.JAVA_XMX,
VARSIMJAR, merged_map, i, out_dir, i,
out_dir, i, out_dir, i,
out_dir, i)
if force_five_base_encoding:
fastq_liftover_command += " -force_five_base_encoding "
if simulator == "art":
fastq_liftover_command += " -type art " \
"-aln <(gunzip -c %s/simulated.lane%d.read1.aln.gz) " \
"-aln <(gunzip -c %s/simulated.lane%d.read2.aln.gz)" % (
out_dir, i, out_dir, i)
elif simulator == "pbsim":
fastq_liftover_command += " -type pbsim " \
"-maf <(gunzip -c %s/simulated.lane%d.read1.maf.gz) " \
"-ref %s/simulated.lane%d.ref " % (out_dir, i, out_dir, i)
fastq_liftover_command = "bash -c \"%s\"" % (fastq_liftover_command)
logger.info("Executing command " + fastq_liftover_command)
run_shell_command(fastq_liftover_command, cmd_stdout = liftover_stdout, cmd_stderr = liftover_stderr)
fastqs.append(os.path.join(out_dir, "lane%d.read%d.fq.gz" % (i, end)))
else:
# liftover the read map files
read_map_files = list(glob.glob(os.path.join(out_dir, "longislnd_sim", "*.bed")))
merged_raw_readmap = os.path.join(out_dir, "longislnd_sim", "merged_readmap.bed")
concatenate_files(read_map_files, merged_raw_readmap)
read_maps = "-longislnd %s" % merged_raw_readmap
read_map_liftover_command = "%s %s -server -jar %s longislnd_liftover " % (java, utils.JAVA_XMX, VARSIMJAR) + read_maps + " -map %s " % merged_map + " -out %s" % (os.path.join(out_dir, sample_id + ".truth.map"))
read_map_liftover_stderr = open(os.path.join(log_dir, "longislnd_liftover.err"), "w")
logger.info("Executing command " + read_map_liftover_command )
run_shell_command(read_map_liftover_command, cmd_stdout = None, cmd_stderr = read_map_liftover_stderr)
monitor_processes(processes)
logger.info("Liftover took %g seconds" % (time.time() - sim_t_liftover))
sim_te = max(sim_ts + 1, time.time())
bytes_written = sum([os.path.getsize(fastq) for fastq in fastqs])
logger.info("Took %g seconds, %ld Mbytes written, %g MB/s" % (
sim_te - sim_ts, bytes_written / 1024.0 / 1024.0, bytes_written / 1024.0 / 1024.0 / (sim_te - sim_ts)))
for fifo in fifos:
os.remove(fifo)
if not keep_temp:
logger.info("Cleaning up intermediate files")
for f in tmp_files:
os.remove(f)
logger.info("Done! (%g hours)" % ((time.time() - t_s) / 3600.0))
if __name__ == "__main__":
main_parser = argparse.ArgumentParser(description="VarSim: A high-fidelity simulation validation framework",
formatter_class=argparse.ArgumentDefaultsHelpFormatter)
main_parser.add_argument("--out_dir", metavar="DIR",
help="Output directory for the simulated genome, reads and variants", required=False,
default="out")
main_parser.add_argument("--work_dir", metavar="DIR", help="Work directory, currently not used", required=False,
default="work")
main_parser.add_argument("--log_dir", metavar="DIR", help="Log files of all steps are kept here", required=False,
default="log")
main_parser.add_argument("--reference", metavar="FASTA", help="Reference genome that variants will be inserted into",
required=True)
main_parser.add_argument("--seed", metavar="seed", help="Random number seed for reproducibility", type=int, default=0)
main_parser.add_argument("--sex", metavar="Sex", help="Sex of the person (MALE/FEMALE)", required=False, type=str,
choices=["MALE", "FEMALE"], default="MALE")
main_parser.add_argument("--id", metavar="ID", help="Sample ID to be put in output VCF file", required=True)
main_parser.add_argument("--simulator", metavar="SIMULATOR", help="Read simulator to use", required=False, type=str,
choices=["art", "dwgsim", "longislnd"], default="art")
main_parser.add_argument("--simulator_executable", metavar="PATH",
help="Path to the executable of the read simulator chosen"
, required=True)
main_parser.add_argument("--varsim_jar", metavar="PATH", help="Path to VarSim.jar (deprecated)",
default=VARSIMJAR,
required=False)
main_parser.add_argument("--java", metavar="PATH", help="Path to java",
default="java", required=False)
main_parser.add_argument("--read_length", metavar="LENGTH", help="Length of read to simulate", default=100, type=int)
main_parser.add_argument("--nlanes", metavar="INTEGER",
help="Number of lanes to generate, coverage will be divided evenly over the lanes. Simulation is parallized over lanes. Each lane will have its own pair of files",
default=1, type=int)
main_parser.add_argument("--total_coverage", metavar="FLOAT", help="Total coverage to simulate", default=1.0,
type=float)
main_parser.add_argument("--mean_fragment_size", metavar="INT", help="Mean fragment size to simulate", default=350,
type=int)
main_parser.add_argument("--sd_fragment_size", metavar="INT", help="Standard deviation of fragment size to simulate",
default=50, type=int)
main_parser.add_argument("--vcfs", metavar="VCF",
help="Addtional list of VCFs to insert into genome, priority is lowest ... highest", nargs="+",
default=[])
main_parser.add_argument("--force_five_base_encoding", action="store_true", help="Force output bases to be only ACTGN")
main_parser.add_argument("--filter", action="store_true", help="Only use PASS variants for simulation")
main_parser.add_argument("--keep_temp", action="store_true", help="Keep temporary files after simulation")
main_parser.add_argument("--lift_ref", action="store_true", help="Liftover chromosome names from restricted reference")
main_parser.add_argument("--java_max_mem", metavar="XMX", help="max java memory", default="10g", type = str)
main_parser.add_argument('--version', action='version', version=get_version())
main_parser.add_argument('--log_to_stderr', action='store_true', help='Output log to stderr instead of log_dir/varsim.log')
main_parser.add_argument("--loglevel", help="Set logging level", choices=["debug", "warn", "info"], default="info")
pipeline_control_group = main_parser.add_argument_group("Pipeline control options. Disable parts of the pipeline.")
pipeline_control_group.add_argument("--disable_rand_vcf", action="store_true",
help="Disable sampling from the provided small variant VCF")
pipeline_control_group.add_argument("--disable_rand_dgv", action="store_true",
help="Disable sampline from the provided DGV file")
pipeline_control_group.add_argument("--disable_vcf2diploid", action="store_true",
help="Disable diploid genome simulation")
pipeline_control_group.add_argument("--disable_sim", action="store_true", help="Disable read simulation")
# RandVCF2VCF seed num_SNP num_INS num_DEL num_MNP num_COMPLEX percent_novel min_length_lim max_length_lim reference_file file.vcf
rand_vcf_group = main_parser.add_argument_group("Small variant simulation options")
rand_vcf_group.add_argument("--vc_num_snp", metavar="INTEGER", help="Number of SNPs to sample from small variant VCF",
default=0, type=int)
rand_vcf_group.add_argument("--vc_num_ins", metavar="INTEGER",
help="Number of insertions to sample from small variant VCF", default=0, type=int)
rand_vcf_group.add_argument("--vc_num_del", metavar="INTEGER",
help="Number of deletions to sample from small variant VCF", default=0, type=int)
rand_vcf_group.add_argument("--vc_num_mnp", metavar="INTEGER", help="Number of MNPs to sample from small variant VCF",
default=0, type=int)
rand_vcf_group.add_argument("--vc_num_complex", metavar="INTEGER",
help="Number of complex variants to sample from small variant VCF", default=0,
type=int)
rand_vcf_group.add_argument("--vc_percent_novel", metavar="FLOAT",
help="Percent variants sampled from small variant VCF that will be moved to novel positions",
default=0, type=float)
rand_vcf_group.add_argument("--vc_min_length_lim", metavar="INTEGER",
help="Min length of small variant to accept [inclusive]", default=0, type=int)
rand_vcf_group.add_argument("--vc_max_length_lim", metavar="INTEGER",
help="Max length of small variant to accept [inclusive]", default=99,
type=int)
rand_vcf_group.add_argument("--vc_in_vcf", metavar="VCF", help="Input small variant VCF, usually dbSNP",
required=False)
rand_vcf_group.add_argument("--vc_prop_het", metavar="FLOAT", help="Proportion of heterozygous small variants",
default=0.6,
type=float)
# RandDGV2VCF seed num_INS num_DEL num_DUP num_INV percent_novel min_length_lim max_length_lim reference_file insert_seq.txt dgv_file.txt
rand_dgv_group = main_parser.add_argument_group("Structural variant simulation options")
rand_dgv_group.add_argument("--sv_num_ins", metavar="INTEGER", help="Number of insertions to sample from DGV",
default=20, type=int)
rand_dgv_group.add_argument("--sv_num_del", metavar="INTEGER", help="Number of deletions to sample from DGV",
default=20, type=int)
rand_dgv_group.add_argument("--sv_num_dup", metavar="INTEGER", help="Number of duplications to sample from DGV",
default=20, type=int)
rand_dgv_group.add_argument("--sv_num_inv", metavar="INTEGER", help="Number of inversions to sample from DGV",
default=20, type=int)
rand_dgv_group.add_argument("--sv_percent_novel", metavar="FLOAT",
help="Percent variants sampled from DGV that will be moved to novel positions", default=0,
type=float)
rand_dgv_group.add_argument("--sv_min_length_lim", metavar="min_length_lim",
help="Min length of structural variant to accept [inclusive]", default=100,
type=int)
rand_dgv_group.add_argument("--sv_max_length_lim", metavar="max_length_lim",
help="Max length of structural variant to accept [inclusive]", default=1000000,
type=int)
rand_dgv_group.add_argument("--sv_insert_seq", metavar="FILE",
help="Path to file containing concatenation of real insertion sequences",
required=False)
rand_dgv_group.add_argument("--sv_dgv", metavar="DGV_FILE", help="DGV file containing structural variants",
required=False)
rand_dgv_group.add_argument("--sv_prop_het", metavar="FLOAT", help="Proportion of heterozygous structural variants",
default=0.6,
type=float)
dwgsim_group = main_parser.add_argument_group("DWGSIM options")
dwgsim_group.add_argument("--dwgsim_start_e", metavar="first_base_error_rate", help="Error rate on the first base",
default=0.0001, type=float)
dwgsim_group.add_argument("--dwgsim_end_e", metavar="last_base_error_rate", help="Error rate on the last base",
default=0.0015, type=float)
dwgsim_group.add_argument("--dwgsim_options", help="DWGSIM command-line options", default="", required=False)
art_group = main_parser.add_argument_group("ART options")
art_group.add_argument("--profile_1", metavar="profile_file1", help="ART error profile for first end", default="")
art_group.add_argument("--profile_2", metavar="profile_file2", help="ART error profile for second end", default="")
art_group.add_argument("--art_options", help="ART command-line options", default="")
pbsim_group = main_parser.add_argument_group("PBSIM options")
pbsim_group.add_argument("--model_qc", metavar="model_qc", help="PBSIM QC model", default=None, type=str)
longislnd_group = main_parser.add_argument_group("LongISLND options")
longislnd_group.add_argument("--longislnd_options", help="LongISLND options", default="")
args = main_parser.parse_args()
args.java = utils.get_java(args.java)
utils.JAVA_XMX = utils.JAVA_XMX + args.java_max_mem
makedirs([args.log_dir, args.out_dir])
# Setup logging
FORMAT = '%(levelname)s %(asctime)-15s %(name)-20s %(message)s'
loglevel = get_loglevel(args.loglevel)
if not args.log_to_stderr:
logging.basicConfig(filename=os.path.join(args.log_dir, "varsim.log"), filemode="w", level=loglevel, format=FORMAT)
else:
logging.basicConfig(level=loglevel, format=FORMAT)
simulator = None if args.disable_sim else args.simulator
simulator_opts = ""
if args.simulator == "dwgsim":
simulator_opts = "-e {1},{2} -E {1},{2} -d {3} -s {4} -1 {5} -2 {5} {6}".format(args.dwgsim_start_e, args.dwgsim_end_e, args.mean_fragment_size, args.sd_fragment_size, args.read_length, args.dwgsim_options)
elif args.simulator == "art":
profile_opts = "-1 {} -2 {}".format(args.profile_1, args.profile_2) if (args.profile_1 and args.profile_2) else ""
simulator_opts = "-p -l {} -m {} -s {} {} {}".format(args.read_length, args.mean_fragment_size, args.sd_fragment_size, profile_opts, args.art_options)
elif args.simulator == "longislnd":
simulator_opts = args.longislnd_options
elif args.simulator == "pbsim":
raise NotImplementedError("pbsim is no longer supported")
randvcf_options = None if args.disable_rand_vcf else RandVCFOptions(args.vc_num_snp, args.vc_num_ins, args.vc_num_del, args.vc_num_mnp, args.vc_num_complex, args.vc_percent_novel, args.vc_min_length_lim, args.vc_max_length_lim, args.vc_prop_het)
randdgv_options = None if args.disable_rand_dgv else RandDGVOptions(args.sv_num_ins, args.sv_num_del, args.sv_num_dup, args.sv_num_inv, args.sv_percent_novel, args.sv_min_length_lim, args.sv_max_length_lim, args.sv_prop_het)
logger = logging.getLogger()
logger.info(str(args))
varsim_main(args.reference,
simulator,
args.simulator_executable,
args.total_coverage,
variant_vcfs=args.vcfs,
sampling_vcf=args.vc_in_vcf,
dgv_file=args.sv_dgv,
randvcf_options=randvcf_options,
randdgv_options=randdgv_options,
nlanes=args.nlanes,
simulator_options=simulator_opts,
sample_id=args.id,
log_dir=args.log_dir,
out_dir=args.out_dir,
sv_insert_seq=args.sv_insert_seq,
seed=args.seed,
sex=args.sex,
remove_filtered=args.filter,
keep_temp=args.keep_temp,
force_five_base_encoding=args.force_five_base_encoding,
lift_ref=args.lift_ref,
disable_vcf2diploid=args.disable_vcf2diploid,
java=args.java)
|
|
"""The tests for the Rfxtrx light platform."""
import unittest
from homeassistant.bootstrap import _setup_component
from homeassistant.components import rfxtrx as rfxtrx_core
from tests.common import get_test_home_assistant
class TestLightRfxtrx(unittest.TestCase):
"""Test the Rfxtrx light platform."""
def setUp(self):
"""Setup things to be run when tests are started."""
self.hass = get_test_home_assistant(0)
self.hass.config.components = ['rfxtrx']
def tearDown(self):
"""Stop everything that was started."""
rfxtrx_core.RECEIVED_EVT_SUBSCRIBERS = []
rfxtrx_core.RFX_DEVICES = {}
if rfxtrx_core.RFXOBJECT:
rfxtrx_core.RFXOBJECT.close_connection()
self.hass.stop()
def test_valid_config(self):
"""Test configuration."""
self.assertTrue(_setup_component(self.hass, 'light', {
'light': {'platform': 'rfxtrx',
'automatic_add': True,
'devices':
{'0b1100cd0213c7f210010f51': {
'name': 'Test',
rfxtrx_core.ATTR_FIREEVENT: True}}}}))
self.assertTrue(_setup_component(self.hass, 'light', {
'light': {'platform': 'rfxtrx',
'automatic_add': True,
'devices':
{'213c7f216': {
'name': 'Test',
'packetid': '0b1100cd0213c7f210010f51',
'signal_repetitions': 3}}}}))
def test_invalid_config(self):
"""Test configuration."""
self.assertFalse(_setup_component(self.hass, 'light', {
'light': {'platform': 'rfxtrx',
'automatic_add': True,
'invalid_key': 'afda',
'devices':
{'213c7f216': {
'name': 'Test',
'packetid': '0b1100cd0213c7f210010f51',
rfxtrx_core.ATTR_FIREEVENT: True}}}}))
def test_default_config(self):
"""Test with 0 switches."""
self.assertTrue(_setup_component(self.hass, 'light', {
'light': {'platform': 'rfxtrx',
'devices': {}}}))
self.assertEqual(0, len(rfxtrx_core.RFX_DEVICES))
def test_old_config(self):
"""Test with 1 light."""
self.assertTrue(_setup_component(self.hass, 'light', {
'light': {'platform': 'rfxtrx',
'devices':
{'123efab1': {
'name': 'Test',
'packetid': '0b1100cd0213c7f210010f51'}}}}))
import RFXtrx as rfxtrxmod
rfxtrx_core.RFXOBJECT =\
rfxtrxmod.Core("", transport_protocol=rfxtrxmod.DummyTransport)
self.assertEqual(1, len(rfxtrx_core.RFX_DEVICES))
entity = rfxtrx_core.RFX_DEVICES['213c7f216']
self.assertEqual('Test', entity.name)
self.assertEqual('off', entity.state)
self.assertTrue(entity.assumed_state)
self.assertEqual(entity.signal_repetitions, 1)
self.assertFalse(entity.should_fire_event)
self.assertFalse(entity.should_poll)
self.assertFalse(entity.is_on)
entity.turn_on()
self.assertTrue(entity.is_on)
self.assertEqual(entity.brightness, 255)
entity.turn_off()
self.assertFalse(entity.is_on)
self.assertEqual(entity.brightness, 0)
entity.turn_on(brightness=100)
self.assertTrue(entity.is_on)
self.assertEqual(entity.brightness, 100)
entity.turn_on(brightness=10)
self.assertTrue(entity.is_on)
self.assertEqual(entity.brightness, 10)
entity.turn_on(brightness=255)
self.assertTrue(entity.is_on)
self.assertEqual(entity.brightness, 255)
def test_one_light(self):
"""Test with 1 light."""
self.assertTrue(_setup_component(self.hass, 'light', {
'light': {'platform': 'rfxtrx',
'devices':
{'0b1100cd0213c7f210010f51': {
'name': 'Test'}}}}))
import RFXtrx as rfxtrxmod
rfxtrx_core.RFXOBJECT =\
rfxtrxmod.Core("", transport_protocol=rfxtrxmod.DummyTransport)
self.assertEqual(1, len(rfxtrx_core.RFX_DEVICES))
entity = rfxtrx_core.RFX_DEVICES['213c7f216']
self.assertEqual('Test', entity.name)
self.assertEqual('off', entity.state)
self.assertTrue(entity.assumed_state)
self.assertEqual(entity.signal_repetitions, 1)
self.assertFalse(entity.should_fire_event)
self.assertFalse(entity.should_poll)
self.assertFalse(entity.is_on)
entity.turn_on()
self.assertTrue(entity.is_on)
self.assertEqual(entity.brightness, 255)
entity.turn_off()
self.assertFalse(entity.is_on)
self.assertEqual(entity.brightness, 0)
entity.turn_on(brightness=100)
self.assertTrue(entity.is_on)
self.assertEqual(entity.brightness, 100)
entity.turn_on(brightness=10)
self.assertTrue(entity.is_on)
self.assertEqual(entity.brightness, 10)
entity.turn_on(brightness=255)
self.assertTrue(entity.is_on)
self.assertEqual(entity.brightness, 255)
def test_several_lights(self):
"""Test with 3 lights."""
self.assertTrue(_setup_component(self.hass, 'light', {
'light': {'platform': 'rfxtrx',
'signal_repetitions': 3,
'devices':
{'0b1100cd0213c7f230010f71': {
'name': 'Test'},
'0b1100100118cdea02010f70': {
'name': 'Bath'},
'0b1100101118cdea02010f70': {
'name': 'Living'}}}}))
self.assertEqual(3, len(rfxtrx_core.RFX_DEVICES))
device_num = 0
for id in rfxtrx_core.RFX_DEVICES:
entity = rfxtrx_core.RFX_DEVICES[id]
self.assertEqual(entity.signal_repetitions, 3)
if entity.name == 'Living':
device_num = device_num + 1
self.assertEqual('off', entity.state)
self.assertEqual('<Entity Living: off>', entity.__str__())
elif entity.name == 'Bath':
device_num = device_num + 1
self.assertEqual('off', entity.state)
self.assertEqual('<Entity Bath: off>', entity.__str__())
elif entity.name == 'Test':
device_num = device_num + 1
self.assertEqual('off', entity.state)
self.assertEqual('<Entity Test: off>', entity.__str__())
self.assertEqual(3, device_num)
def test_discover_light(self):
"""Test with discovery of lights."""
self.assertTrue(_setup_component(self.hass, 'light', {
'light': {'platform': 'rfxtrx',
'automatic_add': True,
'devices': {}}}))
event = rfxtrx_core.get_rfx_object('0b11009e00e6116202020070')
event.data = bytearray(b'\x0b\x11\x00\x9e\x00\xe6\x11b\x02\x02\x00p')
rfxtrx_core.RECEIVED_EVT_SUBSCRIBERS[0](event)
entity = rfxtrx_core.RFX_DEVICES['0e611622']
self.assertEqual(1, len(rfxtrx_core.RFX_DEVICES))
self.assertEqual('<Entity 0b11009e00e6116202020070: on>',
entity.__str__())
event = rfxtrx_core.get_rfx_object('0b11009e00e6116201010070')
event.data = bytearray(b'\x0b\x11\x00\x9e\x00\xe6\x11b\x01\x01\x00p')
rfxtrx_core.RECEIVED_EVT_SUBSCRIBERS[0](event)
self.assertEqual(1, len(rfxtrx_core.RFX_DEVICES))
event = rfxtrx_core.get_rfx_object('0b1100120118cdea02020070')
event.data = bytearray([0x0b, 0x11, 0x00, 0x12, 0x01, 0x18,
0xcd, 0xea, 0x02, 0x02, 0x00, 0x70])
rfxtrx_core.RECEIVED_EVT_SUBSCRIBERS[0](event)
entity = rfxtrx_core.RFX_DEVICES['118cdea2']
self.assertEqual(2, len(rfxtrx_core.RFX_DEVICES))
self.assertEqual('<Entity 0b1100120118cdea02020070: on>',
entity.__str__())
# trying to add a sensor
event = rfxtrx_core.get_rfx_object('0a52085e070100b31b0279')
event.data = bytearray(b'\nR\x08^\x07\x01\x00\xb3\x1b\x02y')
rfxtrx_core.RECEIVED_EVT_SUBSCRIBERS[0](event)
self.assertEqual(2, len(rfxtrx_core.RFX_DEVICES))
# trying to add a swicth
event = rfxtrx_core.get_rfx_object('0b1100100118cdea02010f70')
event.data = bytearray([0x0b, 0x11, 0x00, 0x10, 0x01, 0x18,
0xcd, 0xea, 0x01, 0x01, 0x0f, 0x70])
rfxtrx_core.RECEIVED_EVT_SUBSCRIBERS[0](event)
self.assertEqual(2, len(rfxtrx_core.RFX_DEVICES))
def test_discover_light_noautoadd(self):
"""Test with discover of light when auto add is False."""
self.assertTrue(_setup_component(self.hass, 'light', {
'light': {'platform': 'rfxtrx',
'automatic_add': False,
'devices': {}}}))
event = rfxtrx_core.get_rfx_object('0b1100120118cdea02020070')
event.data = bytearray([0x0b, 0x11, 0x00, 0x12, 0x01, 0x18,
0xcd, 0xea, 0x02, 0x02, 0x00, 0x70])
rfxtrx_core.RECEIVED_EVT_SUBSCRIBERS[0](event)
self.assertEqual(0, len(rfxtrx_core.RFX_DEVICES))
event = rfxtrx_core.get_rfx_object('0b1100120118cdea02010070')
event.data = bytearray([0x0b, 0x11, 0x00, 0x12, 0x01, 0x18,
0xcd, 0xea, 0x02, 0x01, 0x00, 0x70])
rfxtrx_core.RECEIVED_EVT_SUBSCRIBERS[0](event)
self.assertEqual(0, len(rfxtrx_core.RFX_DEVICES))
event = rfxtrx_core.get_rfx_object('0b1100120118cdea02020070')
event.data = bytearray([0x0b, 0x11, 0x00, 0x12, 0x01, 0x18,
0xcd, 0xea, 0x02, 0x02, 0x00, 0x70])
rfxtrx_core.RECEIVED_EVT_SUBSCRIBERS[0](event)
self.assertEqual(0, len(rfxtrx_core.RFX_DEVICES))
# Trying to add a sensor
event = rfxtrx_core.get_rfx_object('0a52085e070100b31b0279')
event.data = bytearray(b'\nR\x08^\x07\x01\x00\xb3\x1b\x02y')
rfxtrx_core.RECEIVED_EVT_SUBSCRIBERS[0](event)
self.assertEqual(0, len(rfxtrx_core.RFX_DEVICES))
# Trying to add a switch
event = rfxtrx_core.get_rfx_object('0b1100100118cdea02010f70')
event.data = bytearray([0x0b, 0x11, 0x00, 0x10, 0x01, 0x18,
0xcd, 0xea, 0x01, 0x01, 0x0f, 0x70])
rfxtrx_core.RECEIVED_EVT_SUBSCRIBERS[0](event)
self.assertEqual(0, len(rfxtrx_core.RFX_DEVICES))
|
|
"""Probabilistic potentials"""
__author__ = 'thor'
import pandas as pd
from numpy import *
from matplotlib.pyplot import *
import numpy as np
from collections import Counter
# from numpy.random import rand
# from numpy.random import permutation
import ut.pcoll.order_conserving as colloc
# import ut as ut
from ut.daf.op import cartesian_product
from ut.daf.gr import group_and_count
from ut.daf.ch import ch_col_names
from ut.daf.manip import reorder_columns_as
from ut.util.ulist import ascertain_list
from ut.util.prand import rand_numbers_summing_to_one
from ut.pplot.color import shifted_color_map
import ut.pplot.get
from functools import reduce
class Pot(object):
def __init__(self, data=None):
if isinstance(data, Pot):
self.tb = data.tb
elif isinstance(data, float) or isinstance(data, int):
self.tb = pd.DataFrame([{'pval': data}])
elif data is not None:
if isinstance(data, pd.DataFrame):
# inject the dataframe in the tb attribute: It's the potential data
assert 'pval' in data.columns, "dataframe had no pval column"
self.tb = data
elif isinstance(data, dict):
if 'pval' not in list(data.keys()):
data = dict(data, pval=len(data[list(data.keys())[0]]) * [1])
self.tb = pd.DataFrame(data=data)
else:
try:
self.tb = data.tb.copy()
except Exception:
raise ValueError("Unknown construction type")
else:
self.tb = pd.DataFrame({'pval': 1}, index=['']) # default "unit" potential
self.tb.index = [''] * len(self.tb)
def vars(self):
return colloc.setdiff(list(self.tb.columns), ['pval'])
###########################################
# OPERATIONS
###########################################
def get_slice(self, intercept_dict):
"""
Return sub-pot going through specific "intercept points"
For example, if X is a pot on ABC, then X.get_slice({'A':0, 'B':1}) is the pot on C taken from ABC where
A=0 and B=1.
It's like a subplane of points defined by given axis intercepts.
"""
tb = self.tb.copy()
for k, v in intercept_dict.items():
tb = tb[tb[k] == v]
del tb[k]
return Pot(tb)
def project_to(self, var_list=[]):
"""
project to a subset of variables (marginalize out other variables)
"""
var_list = colloc.intersect(ascertain_list(var_list), self.vars())
if var_list: # if non-empty, marginalize out other variables
return Pot(self.tb[var_list + ['pval']].groupby(var_list).sum().reset_index())
else: # if _var_list is empty, return a singleton potential containing the sum of the vals of self.tb
return Pot(pd.DataFrame({'pval': self.tb['pval'].sum()}, index=['']))
def __rshift__(self, var_list):
return self.project_to(var_list)
def normalize(self, var_list=[]):
"""
'Normalization' of the pot with respect to _var_list.
Will define the pot by the projection of the pot on a subset of the variables.
Note: If this subset is the empty set, this will correspond to "full normalization", i.e. dividing the vals by
the sum of all vals.
Use:
* This can be used to transform a count potential into a probability potential
(if your sample is large enough!)
* Conditional Probability: P(A|B) = P(AB) / P(B)
"""
return self / self.project_to(var_list)
def __or__(self, item):
"""
If item is empty/none/false, a string or a list, it normalizes according to item.
If item is a dict, it normalizes according to the keys, and slices according to the dict.
--> This resembles P(A|B=1) kind of thing...
"""
print("I'm trying to discourage using | now (might want to use it for fuzzy logic at some point")
print("--> Use / instead of |. ")
if isinstance(item, str):
return self / self.project_to([item])
elif isinstance(item, list):
return self / self.project_to(item)
elif isinstance(item, dict):
intercept_dict = item
var_list = colloc.intersect(self.vars(), list(intercept_dict.keys()))
return (self / self.project_to(var_list)).get_slice(intercept_dict)
else:
TypeError('Unknown item type')
def __getitem__(self, item):
"""
This function is called when accessing the pot with [] brackets, and will return a slice of projection of the
pot depending on the type of item.
"""
if item:
if isinstance(item, dict):
return self.get_slice(item)
elif isinstance(item, list):
return self.project_to(item)
elif isinstance(item, str):
return self.project_to(item)
else:
raise TypeError("Unknown type for item (must be None, dict, list, or string)")
else:
return Pot(pd.DataFrame({'pval': self.tb['pval'].sum()}, index=['']))
def __add__(self, pot):
return Pot(_val_add_(self._merge_(pot)))
# if isinstance(y, float) | isinstance(y, int):
# self.tb['pval'] += y
# else:
# pass
def __mul__(self, pot):
"""
Multiply two potentials
"""
return Pot(_val_prod_(self._merge_(pot)))
def __div__(self, item):
"""
Operation depends on what item's type is. If item is a:
Pot: perform potential division (like multiplication but with pvals divided).
empty/none/false, a string or a list: normalize according to item.
dict: it normalizes according to the keys, and slices according to the dict.
--> This resembles P(A|B=1) kind of thing...
"""
if isinstance(item, Pot):
return Pot(_val_div_(self._merge_(item)))
elif isinstance(item, str):
return self.normalize([item])
elif isinstance(item, list):
return self.normalize(item)
elif isinstance(item, dict):
intercept_dict = item
var_list = colloc.intersect(self.vars(), list(intercept_dict.keys()))
return self.normalize(var_list).get_slice(intercept_dict)
else:
TypeError('Unknown item type')
def __truediv__(self, item):
return self.__div__(item)
def assimilate(self, pot):
"""
Assimilate information given by input pot (returning the result).
Assimilation means multiplication followed by a projection to the original variables.
This is used, for example, when wanting to compute P(X|D=data) as the normalization of P(D=data|X) * P(X)
(Bayes rule). We can write that as P(X) absorbing P(D=data|X). The result has the dimensions of X.
"""
return self.__mul__(pot).normalize([]).project_to(self.vars())
def unassimilate(self, pot):
"""
Inverse of assimilate.
"""
return self.__div__(pot).normalize([]).project_to(self.vars())
###########################################
# Usable UTILS
###########################################
def order_vars(self, var_list, sort_pts=True):
self.tb = reorder_columns_as(self.tb, ascertain_list(var_list))
if sort_pts:
self.sort_pts()
return self
def sort_pts(self, var_list=None, **kwargs):
var_list = var_list or self.vars()
self.tb = self.tb.sort_values(by=var_list, **kwargs)
return self
def pval(self):
return self.tb.pval
def pval_of(self, var_val_dict, default_val=0.0):
t = self.get_slice(var_val_dict)
n = len(t.tb)
if n == 0:
return default_val
elif n == 1:
return t.tb.pval[0]
else:
raise RuntimeError("In pval_of(): get_slice returned more than one value")
def binarize(self, var_values_to_map_to_1_dict):
"""
maps specified variables to {0, 1}
var_values_to_map_to_1_dict is a {variable_name: values to map to 1} specification dict
"""
for var_name, vals_to_map_to_1 in var_values_to_map_to_1_dict.items():
tb = self.tb.copy()
if not hasattr(vals_to_map_to_1, '__iter__'):
vals_to_map_to_1 = [vals_to_map_to_1]
lidx = tb[var_name].isin(vals_to_map_to_1)
tb[var_name] = 0
tb[var_name].loc[lidx] = 1
tb = tb.groupby(self.vars()).sum().reset_index(drop=False)
return Pot(tb)
def round(self, ndigits=None, inplace=False):
if ndigits is None:
ndigits = abs(int(math.log10(self.tb['pval'].min()))) + 1 + 2
print(ndigits)
rounded_pvals = [round(x, ndigits) for x in self.tb['pval']]
if inplace:
self.tb['pval'] = rounded_pvals
else:
x = Pot(self)
x.tb['pval'] = rounded_pvals
return x
def rect_perspective_df(self):
vars = self.vars()
assert len(self.vars()) == 2, "You can only get the rect_perspective_df of a pot with exactly two variables"
return self.tb.set_index([vars[0], vars[1]]).unstack(vars[1])['pval']
###########################################
# Hidden UTILS
###########################################
def _merge_(self, pot):
"""
Util function. Shouldn't really be used directly by the user.
Merge (join) two pots.
An inner merge of the two pots, on the intersection of their variables (if non-empty) will be performed,
producing val_x and val_y columns that will contain the original left and right values, aligned with the join.
Note: If the vars intersection is empty, the join will correspond to the cartesian product of the variables.
"""
on = colloc.intersect(self.vars(), pot.vars()) # we will merge on the intersection of the variables (not pval)
if on:
return pd.merge(self.tb, pot.tb, how='inner', on=on, sort=True, suffixes=('_x', '_y'))
else: # if no common variables, take the cartesian product
return cartesian_product(self.tb, pot.tb)
def __str__(self):
"""
This will return a string that represents the underlying dataframe (used when printing the pot)
"""
return self.tb.__repr__()
def __repr__(self):
"""
This is used by iPython to display a variable.
I chose to do thing differently than __str__.
Here the dataframe is indexed by the vars and then made into a string.
This provides a hierarchical progression perspective to the variable combinations.
"""
if self.vars():
return self.tb.set_index(self.vars()).__str__()
else:
return self.tb.__repr__()
#def assert_pot_validity(self):
# assert 'pval' in self.tb.columns, "the potential dataframe has no column named 'pval'"
# assert len(self.tb.)
#################################################################################
# FACTORIES
@classmethod
def binary_pot(cls, varname, prob=1):
return Pot(pd.DataFrame({varname: [0, 1], 'pval': [1 - prob, prob]}))
@classmethod
def from_points_to_count(cls, pts, vars=None):
"""
By "points" I mean a collection (through some data structure) of multi-dimensional coordinates.
By default, all unique points will be grouped and the pval will be the cardinality of each group.
"""
if isinstance(pts, pd.DataFrame):
# tb = group_and_count(pts)
# tb = ch_col_names(tb, 'pval', 'count')
return Pot(group_and_count(pts, count_col='pval'))
else:
counts = Counter(pts)
if vars is None:
example_key = list(counts.keys())[0]
vars = list(range(len(example_key)))
return Pot(pd.DataFrame(
[dict(pval=v, **{kk: vv for kk, vv in zip(vars, k)}) for k, v in counts.items()])
)
@classmethod
def from_count_df_to_count(cls, count_df, count_col='pval'):
"""
Creates a potential from a dataframe specifying point counts (where the count column name is specified by
count_col
"""
pot_vars = list(colloc.setdiff(count_df.columns, [count_col]))
tb = count_df[pot_vars+[count_col]].groupby(pot_vars).sum().reset_index()
tb = ch_col_names(tb, 'pval', count_col)
return Pot(tb)
@classmethod
def from_points_to_bins(cls, pts, **kwargs):
"""
Creates a potential from a dataframe specifying point counts (where the count column name is specified by
count_col
"""
if isinstance(pts, pd.DataFrame):
tb = group_and_count(pts)
tb = ch_col_names(tb, 'pval', 'count')
return Pot(tb)
@classmethod
def rand(cls, n_var_vals=[2, 2], var_names=None, granularity=None, try_to_get_unique_values=False):
# check inputs
assert len(n_var_vals) <= 26, "You can't request more than 26 variables: That's just crazy"
if var_names is None:
var_names = [str(chr(x)) for x in range(ord('A'),ord('Z'))]
assert len(n_var_vals) <= len(var_names), "You can't have less var_names than you have n_var_vals"
assert min(array(n_var_vals)) >= 2, "n_var_vals elements should be >= 2"
# make the df by taking the cartesian product of the n_var_vals defined ranges
df = reduce(cartesian_product, [pd.DataFrame(data=list(range(x)), columns=[y]) for x, y in zip(n_var_vals, var_names)])
n_vals = len(df)
def _get_random_pvals():
if granularity is None:
if n_vals > 18:
x = np.random.rand(n_vals)
return x / sum(x)
elif n_vals == 4:
return np.random.permutation([0.1, 0.2, 0.3, 0.4])
else:
if n_vals <= 12:
return rand_numbers_summing_to_one(n_vals, 0.05)
else:
return rand_numbers_summing_to_one(n_vals, 0.01)
else:
return rand_numbers_summing_to_one(n_vals, granularity)
# choose random vals
if try_to_get_unique_values:
if not isinstance(try_to_get_unique_values, int):
try_to_get_unique_values = 1000
for i in range(try_to_get_unique_values):
pvals = _get_random_pvals()
if len(unique(pvals)) == n_vals:
break
else:
pvals = _get_random_pvals()
df['pval'] = list(map(float, pvals))
return Pot(df)
class ProbPot(Pot):
def __init__(self, data=None):
super(ProbPot, self).__init__(data=data)
def prob_of(self, var_val_dict):
t = self.get_slice(var_val_dict)
n = len(t.tb)
if n == 0:
return 0.0
elif n == 1:
return t.tb.pval[0]
else:
raise RuntimeError("In prob_of(): get_slice returned more than one value")
def given(self, conditional_vars):
return ProbPot(self.__div__(conditional_vars))
def relative_risk(self, event_var, exposure_var, event_val=1, exposed_val=1):
prob = self >> [event_var, exposure_var]
prob.binarize({event_var: event_val, exposure_var: exposed_val})
return (prob / {exposure_var: 1})[{event_var: 1}] \
/ (prob / {exposure_var: 0})[{event_var: 1}]
@staticmethod
def plot_relrisk_matrix(relrisk):
t = relrisk.copy()
matrix_shape = (t['exposure'].nunique(), t['event'].nunique())
m = ut.daf.to.map_vals_to_ints_inplace(t, cols_to_map=['exposure'])
m = m['exposure']
ut.daf.to.map_vals_to_ints_inplace(t, cols_to_map={'event': dict(list(zip(m, list(range(len(m))))))})
RR = zeros(matrix_shape)
RR[t['exposure'], t['event']] = t['relative_risk']
RR[list(range(len(m))), list(range(len(m)))] = nan
RRL = np.log2(RR)
def normalizor(X):
min_x = nanmin(X)
range_x = nanmax(X) - min_x
return lambda x: (x - min_x) / range_x
normalize_this = normalizor(RRL)
center = normalize_this(0)
color_map = shifted_color_map(cmap=cm.get_cmap('coolwarm'), start=0, midpoint=center, stop=1)
imshow(RRL, cmap=color_map, interpolation='none');
xticks(list(range(shape(RRL)[0])), m, rotation=90)
yticks(list(range(shape(RRL)[1])), m)
cbar = colorbar()
cbar.ax.set_yticklabels(["%.02f" % x for x in np.exp2(array(ut.pplot.get.get_colorbar_tick_labels_as_floats(cbar)))])
#
#
# class ValPot(Pot):
# def __init__(self, **kwargs):
# super(ValPot, self).__init__(**kwargs)
##### Data Prep utils
def from_points_to_binary(d, mid_fun=median):
dd = d.copy()
columns = d.columns
for c in columns:
dd[c] = list(map(int, d[c] > mid_fun(d[c])))
return dd
##### Other utils
def relative_risk(joint_prob_pot, event_var, exposure_var):
prob = joint_prob_pot >> [event_var, exposure_var]
return (prob / {exposure_var: 1})[{event_var: 1}] \
/ (prob / {exposure_var: 0})[{event_var: 1}]
def _val_prod_(tb):
"""
multiplies column val_x and val_y creating column pval (and removing val_x and val_y)
"""
tb['pval'] = tb['pval_x'] * tb['pval_y']
tb.drop(labels=['pval_x', 'pval_y'], axis=1, inplace=True)
return tb
def _val_div_(tb):
"""
divides column val_x and val_y creating column pval (and removing val_x and val_y)
Note: 0/0 will be equal to 0
"""
tb['pval'] = np.true_divide(tb['pval_x'], tb['pval_y']).fillna(0)
tb.drop(labels=['pval_x', 'pval_y'], axis=1, inplace=True)
return tb
def _val_add_(tb):
"""
divides column val_x and val_y creating column pval (and removing val_x and val_y)
Note: 0/0 will be equal to 0
"""
tb['pval'] = tb['pval_x'] + tb['pval_y']
tb.drop(labels=['pval_x', 'pval_y'], axis=1, inplace=True)
return tb
|
|
import time
from BCBio import GFF
from django.db import connection
from edge.models import Fragment, Fragment_Chunk_Location
def circular_mod(number, seq_length):
return ((number - 1) % seq_length) + 1
class GFFImporter(object):
def __init__(self, genome, gff_fasta_fn):
self.__genome = genome
self.__gff_fasta_fn = gff_fasta_fn
def do_import(self):
in_file = self.__gff_fasta_fn
in_handle = open(in_file)
# In DEBUG=True mode, Django keeps list of queries and blows up memory
# usage when doing a big import. The following line disables this
# logging.
connection.use_debug_cursor = False
for rec in GFF.parse(in_handle):
if self.__genome.fragments.filter(name=rec.id).count() > 0:
print("skipping %s, already imported" % rec.id)
else:
f = GFFFragmentImporter(rec).do_import()
self.__genome.genome_fragment_set.create(fragment=f, inherited=False)
# Be nice and turn debug cursor back on
connection.use_debug_cursor = True
in_handle.close()
class GFFFragmentImporter(object):
def __init__(self, gff_rec):
self.__rec = gff_rec
self.__sequence = None
self.__features = None
self.__fclocs = None
self.__subfeatures_dict = {}
def do_import(self):
self.parse_gff()
t0 = time.time()
f = self.build_fragment()
print("build fragment: %.4f" % (time.time() - t0,))
t0 = time.time()
self.annotate(f)
print("annotate: %.4f" % (time.time() - t0,))
return f
def parse_gff(self):
name_fields = (
"name",
"Name",
"gene",
"locus",
"locus_tag",
"product",
"protein_id",
)
self.__sequence = str(self.__rec.seq)
seqlen = len(self.__sequence)
print("%s: %s" % (self.__rec.id, seqlen))
features = []
for feature in self.__rec.features:
# skip features that cover the entire sequence
if feature.type.upper() in ['REGION', 'CHR', 'CHROM', 'CHROMOSOME']:
continue
# get name
name = feature.id
if name == "":
name = feature.type
for field in name_fields:
if field in feature.qualifiers:
v = feature.qualifiers[field]
if len(v) > 0:
name = v[0]
break
name = name[0:100]
# get qualifiers
qualifiers = {}
for field in feature.qualifiers:
v = feature.qualifiers[field]
if len(v) > 0:
qualifiers[field] = v
# start in Genbank format is start after, so +1 here
features.append(
(
circular_mod(int(feature.location.start) + 1, seqlen),
circular_mod(int(feature.location.end), seqlen),
name,
feature.type,
feature.strand,
qualifiers,
)
)
feature_name = name
# add sub features for chunking for CDS only
self.__subfeatures_dict[feature_name] = []
# order based on relative position in the feature
first, second = [], []
for sub in sorted(feature.sub_features, key=lambda f: int(f.location.start)):
if circular_mod(int(sub.location.start) + 1, seqlen) < features[-1][0]:
second.append(sub)
else:
first.append(sub)
sub_feats_to_iter = first + second
for sub in sub_feats_to_iter:
# change name for sub feature
subfeature_name = ''
for field in name_fields:
if field in sub.qualifiers:
v = sub.qualifiers[field]
if len(v) > 0:
subfeature_name = v[0]
break
subfeature_name = subfeature_name[0:100]
if subfeature_name == '':
if sub.id != '':
subfeature_name = sub.id
else:
subfeature_name = feature_name
# check that the type is right
if sub.type.upper() in ['CDS', 'EXON', 'INTRON'] or sub.type.upper()[-3:] == 'RNA':
qualifiers = {}
for field in sub.qualifiers:
v = sub.qualifiers[field]
if len(v) > 0:
qualifiers[field] = v
sub_tup = (
circular_mod(int(sub.location.start) + 1, seqlen),
circular_mod(int(sub.location.end), seqlen),
subfeature_name,
sub.type,
sub.strand,
qualifiers,
)
# if it has no id, it belongs to the feature
# otherwise, mark it as its own feature
if subfeature_name == feature_name:
self.__subfeatures_dict[feature_name].append(sub_tup)
else:
if subfeature_name in self.__subfeatures_dict:
self.__subfeatures_dict[subfeature_name].append(sub_tup)
else:
features.append(sub_tup)
self.__subfeatures_dict[subfeature_name] = [sub_tup]
first, second = [], []
for sub_sub in sorted(sub.sub_features, key=lambda f: int(f.location.start)):
if circular_mod(int(sub.location.start) + 1, seqlen) < features[-1][0]:
second.append(sub_sub)
else:
first.append(sub_sub)
sub_sub_feats_to_iter = first + second
for sub_sub in sub_sub_feats_to_iter:
# change name for sub sub feature
subsubfeature_name = ''
for field in name_fields:
if field in sub_sub.qualifiers:
v = sub_sub.qualifiers[field]
if len(v) > 0:
subsubfeature_name = v[0]
break
subsubfeature_name = subsubfeature_name[0:100]
if subsubfeature_name == '':
if sub_sub.id != '':
subsubfeature_name = sub_sub.id
else:
subsubfeature_name = subfeature_name
qualifiers = {}
for field in feature.qualifiers:
v = feature.qualifiers[field]
if len(v) > 0:
qualifiers[field] = v
sub_sub_tup = (
circular_mod(int(sub_sub.location.start) + 1, seqlen),
circular_mod(int(sub_sub.location.end), seqlen),
subsubfeature_name,
sub_sub.type,
sub_sub.strand,
qualifiers,
)
# if it has no id and the sub feature has no id, it belongs to the feature
# if it has no id and the sub feature has id, it belongs to the sub feature
# otherwise, mark it as its own feature
if subsubfeature_name == feature_name:
self.__subfeatures_dict[feature_name].append(sub_sub_tup)
elif subsubfeature_name == subfeature_name:
self.__subfeatures_dict[subfeature_name].append(sub_sub_tup)
else:
if subsubfeature_name in self.__subfeatures_dict:
self.__subfeatures_dict[subsubfeature_name].append(sub_sub_tup)
else:
features.append(sub_sub_tup)
self.__subfeatures_dict[subsubfeature_name] = [sub_sub_tup]
self.__features = features
# update features made from only subfeatures
for feature in features:
if self.__subfeatures_dict[feature[2]] != []:
features.remove(feature)
new_start = self.__subfeatures_dict[feature[2]][0][0]
new_end = self.__subfeatures_dict[feature[2]][-1][1]
new_feature = (new_start, new_end, feature[2], feature[3], feature[4], feature[5])
features.append(new_feature)
def build_fragment(self):
# pre-chunk the fragment sequence at feature start and end locations.
# there should be no need to further divide any chunk during import.
starts_and_ends = []
for feature in self.__features:
name = feature[2]
starts_and_ends.append(feature[0])
starts_and_ends.append(feature[1] + 1)
for subfeature in self.__subfeatures_dict[name]:
starts_and_ends.append(subfeature[0])
starts_and_ends.append(subfeature[1] + 1)
break_points = sorted(list(set(starts_and_ends)))
cur_len = 0
chunk_sizes = []
seq_len = len(self.__sequence)
for i, bp in enumerate(break_points):
if i == 0:
if bp > 1:
chunk_sizes.append(break_points[i] - 1)
cur_len += chunk_sizes[-1]
else:
chunk_sizes.append(break_points[i] - break_points[i - 1])
cur_len += chunk_sizes[-1]
if cur_len < seq_len:
chunk_sizes.append(seq_len - cur_len)
fragment_circular = False
for feature in self.__rec.features:
# skip features that cover the entire sequence
if feature.type.upper() in ['REGION', 'CHR', 'CHROM', 'CHROMOSOME']:
if 'Is_circular' in feature.qualifiers:
fragment_circular = feature.qualifiers['Is_circular'][0].upper() == 'TRUE'
break
new_fragment = Fragment(
name=self.__rec.id, circular=fragment_circular, parent=None, start_chunk=None
)
new_fragment.save()
new_fragment = new_fragment.indexed_fragment()
# divide chunks bigger than a certain threshold to smaller chunks, to
# allow insertion of sequence into database. e.g. MySQL has a packet
# size that prevents chunks that are too large from being inserted.
chunk_size_limit = 1000000
new_chunk_sizes = []
for original_chunk_size in chunk_sizes:
if original_chunk_size < chunk_size_limit:
new_chunk_sizes.append(original_chunk_size)
else:
divided_chunks = []
while original_chunk_size > 0:
divided_chunks.append(min(original_chunk_size, chunk_size_limit))
original_chunk_size -= chunk_size_limit
new_chunk_sizes.extend(divided_chunks)
chunk_sizes = new_chunk_sizes
print("%d chunks" % (len(chunk_sizes),))
prev = None
fragment_len = 0
for chunk_size in chunk_sizes:
t0 = time.time()
prev = new_fragment._append_to_fragment(
prev,
fragment_len,
self.__sequence[fragment_len : fragment_len + chunk_size],
)
fragment_len += chunk_size
print("add chunk to fragment: %.4f\r" % (time.time() - t0,), end="")
return new_fragment
def annotate(self, fragment):
self.__fclocs = {
c.base_first: c
for c in Fragment_Chunk_Location.objects.select_related("chunk").filter(
fragment=fragment
)
}
for feature in self.__features:
t0 = time.time()
f_start, f_end, f_name, f_type, f_strand, f_qualifiers = feature
# print(' %s %s: %s-%s %s' % (f_type, f_name, f_start, f_end, f_strand))
if self.__subfeatures_dict[f_name] != []:
f_qualifiers['subfeature_qualifiers'] = {
f"{sf[0]}_{sf[1]}": sf[5] for sf in self.__subfeatures_dict[f_name]
if sf[0] != f_start and sf[1] != f_end
}
new_feature = self._annotate_feature(
fragment, f_start, f_end, f_name, f_type, f_strand, f_qualifiers
)
if new_feature is None:
continue
feature_base_first = 1
sorted_subfeatures = self.__subfeatures_dict[f_name]
if f_strand == -1:
sorted_subfeatures.reverse()
for subfeature in sorted_subfeatures:
sf_start, sf_end, sf_name, sf_type, sf_strand, sf_qualifiers = subfeature
self._annotate_feature(
fragment, sf_start, sf_end, sf_name, sf_type, sf_strand, sf_qualifiers,
feature=new_feature, feature_base_first=feature_base_first
)
feature_base_first += sf_end - sf_start + 1
print("annotate feature: %.4f\r" % (time.time() - t0,), end="")
print("\nfinished annotating feature")
def _annotate_feature(
self, fragment, first_base1, last_base1, name, type, strand, qualifiers,
feature=None, feature_base_first=1
):
wrap_around = fragment.circular and last_base1 < first_base1
if wrap_around:
# has to figure out the total length from last chunk
length = len(self.__sequence) - first_base1 + 1 + last_base1
else:
length = last_base1 - first_base1 + 1
if length <= 0:
raise Exception("Annotation must have length one or more")
if first_base1 not in self.__fclocs or (
(last_base1 < len(self.__sequence) and last_base1 + 1 not in self.__fclocs)) or (
last_base1 > len(self.__sequence)
):
"""
raise Exception(
"Cannot find appropriate sequence for feature: %s, start %s, end %s"
% (name, first_base1, last_base1)
)
"""
# 11/03/2021 - ignoring features at end of GFF that went beyond the last bp
return None
bases = []
for key in self.__fclocs:
fcloc = self.__fclocs[key]
if fcloc.base_first >= first_base1 and fcloc.base_last <= last_base1:
bases.append((fcloc.base_first, fcloc.base_last))
elif wrap_around:
if fcloc.base_first >= first_base1 or fcloc.base_last <= last_base1:
bases.append((fcloc.base_first, fcloc.base_last))
sorting_strand = strand if strand is not None else 1
bases.sort(key=lambda x: (wrap_around * (x[1] <= last_base1), sorting_strand * x[0]))
length = 0
for first_base1, last_base1 in bases:
length += fragment.bp_covered_length(first_base1, last_base1)
new_feature = fragment._add_feature(
name, type, length, strand, qualifiers
) if feature is None else feature
if feature is not None or self.__subfeatures_dict[name] == []:
for first_base1, last_base1 in bases:
region_length = fragment.bp_covered_length(first_base1, last_base1)
fragment.annotate_chunk(
new_feature, feature_base_first, first_base1, last_base1
)
feature_base_first += region_length
return new_feature
|
|
"""The tests for the manual Alarm Control Panel component."""
from datetime import timedelta
import unittest
from unittest.mock import patch
from homeassistant.setup import setup_component
from homeassistant.const import (
STATE_ALARM_DISARMED, STATE_ALARM_ARMED_HOME, STATE_ALARM_ARMED_AWAY,
STATE_ALARM_PENDING, STATE_ALARM_TRIGGERED)
from homeassistant.components import alarm_control_panel
import homeassistant.util.dt as dt_util
from tests.common import fire_time_changed, get_test_home_assistant
CODE = 'HELLO_CODE'
class TestAlarmControlPanelManual(unittest.TestCase):
"""Test the manual alarm module."""
def setUp(self): # pylint: disable=invalid-name
"""Setup things to be run when tests are started."""
self.hass = get_test_home_assistant()
def tearDown(self): # pylint: disable=invalid-name
"""Stop down everything that was started."""
self.hass.stop()
def test_arm_home_no_pending(self):
"""Test arm home method."""
self.assertTrue(setup_component(
self.hass, alarm_control_panel.DOMAIN,
{'alarm_control_panel': {
'platform': 'manual',
'name': 'test',
'code': CODE,
'pending_time': 0,
'disarm_after_trigger': False
}}))
entity_id = 'alarm_control_panel.test'
self.assertEqual(STATE_ALARM_DISARMED,
self.hass.states.get(entity_id).state)
alarm_control_panel.alarm_arm_home(self.hass, CODE)
self.hass.block_till_done()
self.assertEqual(STATE_ALARM_ARMED_HOME,
self.hass.states.get(entity_id).state)
def test_arm_home_with_pending(self):
"""Test arm home method."""
self.assertTrue(setup_component(
self.hass, alarm_control_panel.DOMAIN,
{'alarm_control_panel': {
'platform': 'manual',
'name': 'test',
'code': CODE,
'pending_time': 1,
'disarm_after_trigger': False
}}))
entity_id = 'alarm_control_panel.test'
self.assertEqual(STATE_ALARM_DISARMED,
self.hass.states.get(entity_id).state)
alarm_control_panel.alarm_arm_home(self.hass, CODE, entity_id)
self.hass.block_till_done()
self.assertEqual(STATE_ALARM_PENDING,
self.hass.states.get(entity_id).state)
future = dt_util.utcnow() + timedelta(seconds=1)
with patch(('homeassistant.components.alarm_control_panel.manual.'
'dt_util.utcnow'), return_value=future):
fire_time_changed(self.hass, future)
self.hass.block_till_done()
self.assertEqual(STATE_ALARM_ARMED_HOME,
self.hass.states.get(entity_id).state)
def test_arm_home_with_invalid_code(self):
"""Attempt to arm home without a valid code."""
self.assertTrue(setup_component(
self.hass, alarm_control_panel.DOMAIN,
{'alarm_control_panel': {
'platform': 'manual',
'name': 'test',
'code': CODE,
'pending_time': 1,
'disarm_after_trigger': False
}}))
entity_id = 'alarm_control_panel.test'
self.assertEqual(STATE_ALARM_DISARMED,
self.hass.states.get(entity_id).state)
alarm_control_panel.alarm_arm_home(self.hass, CODE + '2')
self.hass.block_till_done()
self.assertEqual(STATE_ALARM_DISARMED,
self.hass.states.get(entity_id).state)
def test_arm_away_no_pending(self):
"""Test arm home method."""
self.assertTrue(setup_component(
self.hass, alarm_control_panel.DOMAIN,
{'alarm_control_panel': {
'platform': 'manual',
'name': 'test',
'code': CODE,
'pending_time': 0,
'disarm_after_trigger': False
}}))
entity_id = 'alarm_control_panel.test'
self.assertEqual(STATE_ALARM_DISARMED,
self.hass.states.get(entity_id).state)
alarm_control_panel.alarm_arm_away(self.hass, CODE, entity_id)
self.hass.block_till_done()
self.assertEqual(STATE_ALARM_ARMED_AWAY,
self.hass.states.get(entity_id).state)
def test_arm_away_with_pending(self):
"""Test arm home method."""
self.assertTrue(setup_component(
self.hass, alarm_control_panel.DOMAIN,
{'alarm_control_panel': {
'platform': 'manual',
'name': 'test',
'code': CODE,
'pending_time': 1,
'disarm_after_trigger': False
}}))
entity_id = 'alarm_control_panel.test'
self.assertEqual(STATE_ALARM_DISARMED,
self.hass.states.get(entity_id).state)
alarm_control_panel.alarm_arm_away(self.hass, CODE)
self.hass.block_till_done()
self.assertEqual(STATE_ALARM_PENDING,
self.hass.states.get(entity_id).state)
future = dt_util.utcnow() + timedelta(seconds=1)
with patch(('homeassistant.components.alarm_control_panel.manual.'
'dt_util.utcnow'), return_value=future):
fire_time_changed(self.hass, future)
self.hass.block_till_done()
self.assertEqual(STATE_ALARM_ARMED_AWAY,
self.hass.states.get(entity_id).state)
def test_arm_away_with_invalid_code(self):
"""Attempt to arm away without a valid code."""
self.assertTrue(setup_component(
self.hass, alarm_control_panel.DOMAIN,
{'alarm_control_panel': {
'platform': 'manual',
'name': 'test',
'code': CODE,
'pending_time': 1,
'disarm_after_trigger': False
}}))
entity_id = 'alarm_control_panel.test'
self.assertEqual(STATE_ALARM_DISARMED,
self.hass.states.get(entity_id).state)
alarm_control_panel.alarm_arm_away(self.hass, CODE + '2')
self.hass.block_till_done()
self.assertEqual(STATE_ALARM_DISARMED,
self.hass.states.get(entity_id).state)
def test_trigger_no_pending(self):
"""Test triggering when no pending submitted method."""
self.assertTrue(setup_component(
self.hass, alarm_control_panel.DOMAIN,
{'alarm_control_panel': {
'platform': 'manual',
'name': 'test',
'trigger_time': 1,
'disarm_after_trigger': False
}}))
entity_id = 'alarm_control_panel.test'
self.assertEqual(STATE_ALARM_DISARMED,
self.hass.states.get(entity_id).state)
alarm_control_panel.alarm_trigger(self.hass, entity_id=entity_id)
self.hass.block_till_done()
self.assertEqual(STATE_ALARM_PENDING,
self.hass.states.get(entity_id).state)
future = dt_util.utcnow() + timedelta(seconds=60)
with patch(('homeassistant.components.alarm_control_panel.manual.'
'dt_util.utcnow'), return_value=future):
fire_time_changed(self.hass, future)
self.hass.block_till_done()
self.assertEqual(STATE_ALARM_TRIGGERED,
self.hass.states.get(entity_id).state)
def test_trigger_with_pending(self):
"""Test arm home method."""
self.assertTrue(setup_component(
self.hass, alarm_control_panel.DOMAIN,
{'alarm_control_panel': {
'platform': 'manual',
'name': 'test',
'pending_time': 2,
'trigger_time': 3,
'disarm_after_trigger': False
}}))
entity_id = 'alarm_control_panel.test'
self.assertEqual(STATE_ALARM_DISARMED,
self.hass.states.get(entity_id).state)
alarm_control_panel.alarm_trigger(self.hass)
self.hass.block_till_done()
self.assertEqual(STATE_ALARM_PENDING,
self.hass.states.get(entity_id).state)
future = dt_util.utcnow() + timedelta(seconds=2)
with patch(('homeassistant.components.alarm_control_panel.manual.'
'dt_util.utcnow'), return_value=future):
fire_time_changed(self.hass, future)
self.hass.block_till_done()
self.assertEqual(STATE_ALARM_TRIGGERED,
self.hass.states.get(entity_id).state)
future = dt_util.utcnow() + timedelta(seconds=5)
with patch(('homeassistant.components.alarm_control_panel.manual.'
'dt_util.utcnow'), return_value=future):
fire_time_changed(self.hass, future)
self.hass.block_till_done()
self.assertEqual(STATE_ALARM_DISARMED,
self.hass.states.get(entity_id).state)
def test_trigger_with_disarm_after_trigger(self):
"""Test disarm after trigger."""
self.assertTrue(setup_component(
self.hass, alarm_control_panel.DOMAIN,
{'alarm_control_panel': {
'platform': 'manual',
'name': 'test',
'trigger_time': 5,
'pending_time': 0,
'disarm_after_trigger': True
}}))
entity_id = 'alarm_control_panel.test'
self.assertEqual(STATE_ALARM_DISARMED,
self.hass.states.get(entity_id).state)
alarm_control_panel.alarm_trigger(self.hass, entity_id=entity_id)
self.hass.block_till_done()
self.assertEqual(STATE_ALARM_TRIGGERED,
self.hass.states.get(entity_id).state)
future = dt_util.utcnow() + timedelta(seconds=5)
with patch(('homeassistant.components.alarm_control_panel.manual.'
'dt_util.utcnow'), return_value=future):
fire_time_changed(self.hass, future)
self.hass.block_till_done()
self.assertEqual(STATE_ALARM_DISARMED,
self.hass.states.get(entity_id).state)
def test_disarm_while_pending_trigger(self):
"""Test disarming while pending state."""
self.assertTrue(setup_component(
self.hass, alarm_control_panel.DOMAIN,
{'alarm_control_panel': {
'platform': 'manual',
'name': 'test',
'trigger_time': 5,
'disarm_after_trigger': False
}}))
entity_id = 'alarm_control_panel.test'
self.assertEqual(STATE_ALARM_DISARMED,
self.hass.states.get(entity_id).state)
alarm_control_panel.alarm_trigger(self.hass)
self.hass.block_till_done()
self.assertEqual(STATE_ALARM_PENDING,
self.hass.states.get(entity_id).state)
alarm_control_panel.alarm_disarm(self.hass, entity_id=entity_id)
self.hass.block_till_done()
self.assertEqual(STATE_ALARM_DISARMED,
self.hass.states.get(entity_id).state)
future = dt_util.utcnow() + timedelta(seconds=5)
with patch(('homeassistant.components.alarm_control_panel.manual.'
'dt_util.utcnow'), return_value=future):
fire_time_changed(self.hass, future)
self.hass.block_till_done()
self.assertEqual(STATE_ALARM_DISARMED,
self.hass.states.get(entity_id).state)
def test_disarm_during_trigger_with_invalid_code(self):
"""Test disarming while code is invalid."""
self.assertTrue(setup_component(
self.hass, alarm_control_panel.DOMAIN,
{'alarm_control_panel': {
'platform': 'manual',
'name': 'test',
'pending_time': 5,
'code': CODE + '2',
'disarm_after_trigger': False
}}))
entity_id = 'alarm_control_panel.test'
self.assertEqual(STATE_ALARM_DISARMED,
self.hass.states.get(entity_id).state)
alarm_control_panel.alarm_trigger(self.hass)
self.hass.block_till_done()
self.assertEqual(STATE_ALARM_PENDING,
self.hass.states.get(entity_id).state)
alarm_control_panel.alarm_disarm(self.hass, entity_id=entity_id)
self.hass.block_till_done()
self.assertEqual(STATE_ALARM_PENDING,
self.hass.states.get(entity_id).state)
future = dt_util.utcnow() + timedelta(seconds=5)
with patch(('homeassistant.components.alarm_control_panel.manual.'
'dt_util.utcnow'), return_value=future):
fire_time_changed(self.hass, future)
self.hass.block_till_done()
self.assertEqual(STATE_ALARM_TRIGGERED,
self.hass.states.get(entity_id).state)
|
|
# Licensed to the Software Freedom Conservancy (SFC) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The SFC licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
try:
from http.client import BadStatusLine
except ImportError:
from httplib import BadStatusLine
import pytest
from selenium.common.exceptions import (
NoSuchElementException,
NoSuchFrameException,
TimeoutException,
WebDriverException)
from selenium.webdriver.common.by import By
from selenium.webdriver.support.ui import WebDriverWait
from selenium.webdriver.support import expected_conditions as EC
class TestFrameSwitching(object):
# ----------------------------------------------------------------------------------------------
#
# Tests that WebDriver doesn't do anything fishy when it navigates to a page with frames.
#
# ----------------------------------------------------------------------------------------------
def testShouldAlwaysFocusOnTheTopMostFrameAfterANavigationEvent(self, driver, pages):
pages.load("frameset.html")
driver.find_element(By.TAG_NAME, "frameset") # Test passes if this does not throw.
def testShouldNotAutomaticallySwitchFocusToAnIFrameWhenAPageContainingThemIsLoaded(self, driver, pages):
pages.load("iframes.html")
driver.find_element(By.ID, "iframe_page_heading")
def testShouldOpenPageWithBrokenFrameset(self, driver, pages):
pages.load("framesetPage3.html")
frame1 = driver.find_element(By.ID, "first")
driver.switch_to.frame(frame1)
driver.switch_to.default_content()
frame2 = driver.find_element(By.ID, "second")
driver.switch_to.frame(frame2) # IE9 can not switch to this broken frame - it has no window.
# ----------------------------------------------------------------------------------------------
#
# Tests that WebDriver can switch to frames as expected.
#
# ----------------------------------------------------------------------------------------------
def testShouldBeAbleToSwitchToAFrameByItsIndex(self, driver, pages):
pages.load("frameset.html")
driver.switch_to.frame(1)
assert driver.find_element(By.ID, "pageNumber").text == "2"
def testShouldBeAbleToSwitchToAnIframeByItsIndex(self, driver, pages):
pages.load("iframes.html")
driver.switch_to.frame(0)
assert driver.find_element(By.NAME, "id-name1").get_attribute("value") == "name"
def testShouldBeAbleToSwitchToAFrameByItsName(self, driver, pages):
pages.load("frameset.html")
driver.switch_to.frame("fourth")
assert driver.find_element(By.TAG_NAME, "frame").get_attribute("name") == "child1"
def testShouldBeAbleToSwitchToAnIframeByItsName(self, driver, pages):
pages.load("iframes.html")
driver.switch_to.frame("iframe1-name")
assert driver.find_element(By.NAME, "id-name1").get_attribute("value") == "name"
def testShouldBeAbleToSwitchToAFrameByItsID(self, driver, pages):
pages.load("frameset.html")
driver.switch_to.frame("fifth")
assert driver.find_element(By.NAME, "windowOne").text == "Open new window"
def testShouldBeAbleToSwitchToAnIframeByItsID(self, driver, pages):
pages.load("iframes.html")
driver.switch_to.frame("iframe1")
assert driver.find_element(By.NAME, "id-name1").get_attribute("value") == "name"
def testShouldBeAbleToSwitchToFrameWithNameContainingDot(self, driver, pages):
pages.load("frameset.html")
driver.switch_to.frame("sixth.iframe1")
assert "Page number 3" in driver.find_element(By.TAG_NAME, "body").text
def testShouldBeAbleToSwitchToAFrameUsingAPreviouslyLocatedWebElement(self, driver, pages):
pages.load("frameset.html")
frame = driver.find_element(By.TAG_NAME, "frame")
driver.switch_to.frame(frame)
assert driver.find_element(By.ID, "pageNumber").text == "1"
def testShouldBeAbleToSwitchToAnIFrameUsingAPreviouslyLocatedWebElement(self, driver, pages):
pages.load("iframes.html")
frame = driver.find_element(By.TAG_NAME, "iframe")
driver.switch_to.frame(frame)
element = driver.find_element(By.NAME, "id-name1")
assert element.get_attribute("value") == "name"
def testShouldEnsureElementIsAFrameBeforeSwitching(self, driver, pages):
pages.load("frameset.html")
frame = driver.find_element(By.TAG_NAME, "frameset")
with pytest.raises(NoSuchFrameException):
driver.switch_to.frame(frame)
def testFrameSearchesShouldBeRelativeToTheCurrentlySelectedFrame(self, driver, pages):
pages.load("frameset.html")
driver.switch_to.frame("second")
assert driver.find_element(By.ID, "pageNumber").text == "2"
with pytest.raises(NoSuchElementException):
driver.switch_to.frame(driver.find_element_by_name("third"))
driver.switch_to.default_content()
driver.switch_to.frame(driver.find_element_by_name("third"))
with pytest.raises(NoSuchFrameException):
driver.switch_to.frame("second")
driver.switch_to.default_content()
driver.switch_to.frame(driver.find_element_by_name("second"))
assert driver.find_element(By.ID, "pageNumber").text == "2"
def testShouldSelectChildFramesByChainedCalls(self, driver, pages):
pages.load("frameset.html")
driver.switch_to.frame(driver.find_element_by_name("fourth"))
driver.switch_to.frame(driver.find_element_by_name("child2"))
assert driver.find_element(By.ID, "pageNumber").text == "11"
def testShouldThrowFrameNotFoundExceptionLookingUpSubFramesWithSuperFrameNames(self, driver, pages):
pages.load("frameset.html")
driver.switch_to.frame(driver.find_element_by_name("fourth"))
with pytest.raises(NoSuchElementException):
driver.switch_to.frame(driver.find_element_by_name("second"))
def testShouldThrowAnExceptionWhenAFrameCannotBeFound(self, driver, pages):
pages.load("xhtmlTest.html")
with pytest.raises(NoSuchElementException):
driver.switch_to.frame(driver.find_element_by_name("Nothing here"))
def testShouldThrowAnExceptionWhenAFrameCannotBeFoundByIndex(self, driver, pages):
pages.load("xhtmlTest.html")
with pytest.raises(NoSuchFrameException):
driver.switch_to.frame(27)
@pytest.mark.xfail_phantomjs(raises=WebDriverException)
def testShouldBeAbleToSwitchToParentFrame(self, driver, pages):
pages.load("frameset.html")
driver.switch_to.frame(driver.find_element_by_name("fourth"))
driver.switch_to.parent_frame()
driver.switch_to.frame(driver.find_element_by_name("first"))
assert driver.find_element(By.ID, "pageNumber").text == "1"
@pytest.mark.xfail_phantomjs(raises=WebDriverException)
def testShouldBeAbleToSwitchToParentFrameFromASecondLevelFrame(self, driver, pages):
pages.load("frameset.html")
driver.switch_to.frame(driver.find_element_by_name("fourth"))
driver.switch_to.frame(driver.find_element_by_name("child1"))
driver.switch_to.parent_frame()
driver.switch_to.frame(driver.find_element_by_name("child2"))
assert driver.find_element(By.ID, "pageNumber").text == "11"
@pytest.mark.xfail_phantomjs(raises=WebDriverException)
def testSwitchingToParentFrameFromDefaultContextIsNoOp(self, driver, pages):
pages.load("xhtmlTest.html")
driver.switch_to.parent_frame()
assert driver.title == "XHTML Test Page"
@pytest.mark.xfail_phantomjs(raises=WebDriverException)
def testShouldBeAbleToSwitchToParentFromAnIframe(self, driver, pages):
pages.load("iframes.html")
driver.switch_to.frame(0)
driver.switch_to.parent_frame()
driver.find_element(By.ID, "iframe_page_heading")
# ----------------------------------------------------------------------------------------------
#
# General frame handling behavior tests
#
# ----------------------------------------------------------------------------------------------
def testShouldContinueToReferToTheSameFrameOnceItHasBeenSelected(self, driver, pages):
pages.load("frameset.html")
driver.switch_to.frame(2)
checkbox = driver.find_element(By.XPATH, "//input[@name='checky']")
checkbox.click()
checkbox.submit()
# TODO(simon): this should not be needed, and is only here because IE's submit returns too
# soon.
WebDriverWait(driver, 3).until(EC.text_to_be_present_in_element((By.XPATH, '//p'), 'Success!'))
@pytest.mark.xfail_marionette(raises=TimeoutException)
def testShouldFocusOnTheReplacementWhenAFrameFollowsALinkToA_TopTargetedPage(self, driver, pages):
pages.load("frameset.html")
driver.switch_to.frame(0)
driver.find_element(By.LINK_TEXT, "top").click()
expectedTitle = "XHTML Test Page"
WebDriverWait(driver, 3).until(EC.title_is(expectedTitle))
WebDriverWait(driver, 3).until(EC.presence_of_element_located((By.ID, "only-exists-on-xhtmltest")))
def testShouldAllowAUserToSwitchFromAnIframeBackToTheMainContentOfThePage(self, driver, pages):
pages.load("iframes.html")
driver.switch_to.frame(0)
driver.switch_to.default_content()
driver.find_element(By.ID, "iframe_page_heading")
def testShouldAllowTheUserToSwitchToAnIFrameAndRemainFocusedOnIt(self, driver, pages):
pages.load("iframes.html")
driver.switch_to.frame(0)
driver.find_element(By.ID, "submitButton").click()
assert self.getTextOfGreetingElement(driver) == "Success!"
def getTextOfGreetingElement(self, driver):
return WebDriverWait(driver, 3).until(EC.presence_of_element_located((By.ID, "greeting"))).text
def testShouldBeAbleToClickInAFrame(self, driver, pages):
pages.load("frameset.html")
driver.switch_to.frame("third")
# This should replace frame "third" ...
driver.find_element(By.ID, "submitButton").click()
# driver should still be focused on frame "third" ...
assert self.getTextOfGreetingElement(driver) == "Success!"
# Make sure it was really frame "third" which was replaced ...
driver.switch_to.default_content()
driver.switch_to.frame("third")
assert self.getTextOfGreetingElement(driver) == "Success!"
def testShouldBeAbleToClickInAFrameThatRewritesTopWindowLocation(self, driver, pages):
pages.load("click_tests/issue5237.html")
driver.switch_to.frame(driver.find_element_by_id("search"))
driver.find_element(By.ID, "submit").click()
driver.switch_to.default_content()
WebDriverWait(driver, 3).until(EC.title_is("Target page for issue 5237"))
def testShouldBeAbleToClickInASubFrame(self, driver, pages):
pages.load("frameset.html")
driver.switch_to.frame(driver.find_element_by_id("sixth"))
driver.switch_to.frame(driver.find_element_by_id("iframe1"))
# This should replace frame "iframe1" inside frame "sixth" ...
driver.find_element(By.ID, "submitButton").click()
# driver should still be focused on frame "iframe1" inside frame "sixth" ...
assert self.getTextOfGreetingElement(driver), "Success!"
# Make sure it was really frame "iframe1" inside frame "sixth" which was replaced ...
driver.switch_to.default_content()
driver.switch_to.frame(driver.find_element_by_id("sixth"))
driver.switch_to.frame(driver.find_element_by_id("iframe1"))
assert driver.find_element(By.ID, "greeting").text == "Success!"
def testShouldBeAbleToFindElementsInIframesByXPath(self, driver, pages):
pages.load("iframes.html")
driver.switch_to.frame(driver.find_element_by_id("iframe1"))
element = driver.find_element(By.XPATH, "//*[@id = 'changeme']")
assert element is not None
@pytest.mark.xfail_phantomjs
def testGetCurrentUrlReturnsTopLevelBrowsingContextUrl(self, driver, pages):
pages.load("frameset.html")
assert "frameset.html" in driver.current_url
driver.switch_to.frame(driver.find_element_by_name("second"))
assert "frameset.html" in driver.current_url
@pytest.mark.xfail_phantomjs
def testGetCurrentUrlReturnsTopLevelBrowsingContextUrlForIframes(self, driver, pages):
pages.load("iframes.html")
assert "iframes.html" in driver.current_url
driver.switch_to.frame(driver.find_element_by_id("iframe1"))
assert "iframes.html" in driver.current_url
@pytest.mark.xfail_phantomjs(raises=BadStatusLine)
def testShouldBeAbleToSwitchToTheTopIfTheFrameIsDeletedFromUnderUs(self, driver, pages):
pages.load("frame_switching_tests/deletingFrame.html")
driver.switch_to.frame(driver.find_element_by_id("iframe1"))
killIframe = driver.find_element(By.ID, "killIframe")
killIframe.click()
driver.switch_to.default_content()
WebDriverWait(driver, 3).until_not(
EC.presence_of_element_located((By.ID, "iframe1")))
addIFrame = driver.find_element(By.ID, "addBackFrame")
addIFrame.click()
WebDriverWait(driver, 3).until(EC.presence_of_element_located((By.ID, "iframe1")))
driver.switch_to.frame(driver.find_element_by_id("iframe1"))
WebDriverWait(driver, 3).until(EC.presence_of_element_located((By.ID, "success")))
@pytest.mark.xfail_phantomjs(raises=BadStatusLine)
def testShouldBeAbleToSwitchToTheTopIfTheFrameIsDeletedFromUnderUsWithFrameIndex(self, driver, pages):
pages.load("frame_switching_tests/deletingFrame.html")
iframe = 0
WebDriverWait(driver, 3).until(EC.frame_to_be_available_and_switch_to_it(iframe))
# we should be in the frame now
killIframe = driver.find_element(By.ID, "killIframe")
killIframe.click()
driver.switch_to.default_content()
addIFrame = driver.find_element(By.ID, "addBackFrame")
addIFrame.click()
WebDriverWait(driver, 3).until(EC.frame_to_be_available_and_switch_to_it(iframe))
WebDriverWait(driver, 3).until(EC.presence_of_element_located((By.ID, "success")))
@pytest.mark.xfail_phantomjs(raises=BadStatusLine)
def testShouldBeAbleToSwitchToTheTopIfTheFrameIsDeletedFromUnderUsWithWebelement(self, driver, pages):
pages.load("frame_switching_tests/deletingFrame.html")
iframe = driver.find_element(By.ID, "iframe1")
WebDriverWait(driver, 3).until(EC.frame_to_be_available_and_switch_to_it(iframe))
# we should be in the frame now
killIframe = driver.find_element(By.ID, "killIframe")
killIframe.click()
driver.switch_to.default_content()
addIFrame = driver.find_element(By.ID, "addBackFrame")
addIFrame.click()
iframe = driver.find_element(By.ID, "iframe1")
WebDriverWait(driver, 3).until(EC.frame_to_be_available_and_switch_to_it(iframe))
WebDriverWait(driver, 3).until(EC.presence_of_element_located((By.ID, "success")))
@pytest.mark.xfail_chrome(raises=NoSuchElementException)
@pytest.mark.xfail_marionette(raises=WebDriverException)
@pytest.mark.xfail_phantomjs(raises=BadStatusLine)
def testShouldNotBeAbleToDoAnythingTheFrameIsDeletedFromUnderUs(self, driver, pages):
pages.load("frame_switching_tests/deletingFrame.html")
driver.switch_to.frame(driver.find_element_by_id("iframe1"))
killIframe = driver.find_element(By.ID, "killIframe")
killIframe.click()
with pytest.raises(NoSuchFrameException):
driver.find_element(By.ID, "killIframe").click()
def testShouldReturnWindowTitleInAFrameset(self, driver, pages):
pages.load("frameset.html")
driver.switch_to.frame(driver.find_element_by_name("third"))
assert "Unique title" == driver.title
def testJavaScriptShouldExecuteInTheContextOfTheCurrentFrame(self, driver, pages):
pages.load("frameset.html")
assert driver.execute_script("return window == window.top")
driver.switch_to.frame(driver.find_element(By.NAME, "third"))
assert driver.execute_script("return window != window.top")
def testShouldNotSwitchMagicallyToTheTopWindow(self, driver, pages):
pages.load("frame_switching_tests/bug4876.html")
driver.switch_to.frame(0)
WebDriverWait(driver, 3).until(EC.presence_of_element_located((By.ID, "inputText")))
for i in range(20):
try:
input = WebDriverWait(driver, 3).until(EC.presence_of_element_located((By.ID, "inputText")))
submit = WebDriverWait(driver, 3).until(EC.presence_of_element_located((By.ID, "submitButton")))
input.clear()
import random
input.send_keys("rand%s" % int(random.random()))
submit.click()
finally:
url = driver.execute_script("return window.location.href")
# IE6 and Chrome add "?"-symbol to the end of the URL
if (url.endswith("?")):
url = url[:len(url) - 1]
assert pages.url("frame_switching_tests/bug4876_iframe.html") == url
def testGetShouldSwitchToDefaultContext(self, driver, pages):
pages.load("iframes.html")
driver.find_element(By.ID, "iframe1")
driver.switch_to.frame(driver.find_element(By.ID, "iframe1"))
driver.find_element(By.ID, "cheese") # Found on formPage.html but not on iframes.html.
pages.load("iframes.html") # This must effectively switch_to.default_content(), too.
driver.find_element(By.ID, "iframe1")
|
|
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from __future__ import print_function
from collections import namedtuple
import mxnet as mx
import mxnet.ndarray as nd
from mxnet.base import MXNetError
from mxnet import gluon
from mxnet.base import MXNetError
from mxnet.gluon.data.vision import transforms
from mxnet import image
from mxnet.test_utils import *
from common import assertRaises, xfail_when_nonstandard_decimal_separator
import numpy as np
import pytest
def test_to_tensor():
# 3D Input
data_in = np.random.uniform(0, 255, (300, 300, 3)).astype(dtype=np.uint8)
out_nd = transforms.ToTensor()(nd.array(data_in, dtype='uint8'))
assert_almost_equal(out_nd.asnumpy(), np.transpose(
data_in.astype(dtype=np.float32) / 255.0, (2, 0, 1)))
# 4D Input
data_in = np.random.uniform(0, 255, (5, 300, 300, 3)).astype(dtype=np.uint8)
out_nd = transforms.ToTensor()(nd.array(data_in, dtype='uint8'))
assert_almost_equal(out_nd.asnumpy(), np.transpose(
data_in.astype(dtype=np.float32) / 255.0, (0, 3, 1, 2)))
# Invalid Input
invalid_data_in = nd.random.uniform(0, 255, (5, 5, 300, 300, 3)).astype(dtype=np.uint8)
transformer = transforms.ToTensor()
assertRaises(MXNetError, transformer, invalid_data_in)
# Bounds (0->0, 255->1)
data_in = np.zeros((10, 20, 3)).astype(dtype=np.uint8)
out_nd = transforms.ToTensor()(nd.array(data_in, dtype='uint8'))
assert same(out_nd.asnumpy(), np.transpose(np.zeros(data_in.shape, dtype=np.float32), (2, 0, 1)))
data_in = np.full((10, 20, 3), 255).astype(dtype=np.uint8)
out_nd = transforms.ToTensor()(nd.array(data_in, dtype='uint8'))
assert same(out_nd.asnumpy(), np.transpose(np.ones(data_in.shape, dtype=np.float32), (2, 0, 1)))
def test_normalize():
# 3D Input
data_in_3d = nd.random.uniform(0, 1, (3, 300, 300))
out_nd_3d = transforms.Normalize(mean=(0, 1, 2), std=(3, 2, 1))(data_in_3d)
data_expected_3d = data_in_3d.asnumpy()
data_expected_3d[:][:][0] = data_expected_3d[:][:][0] / 3.0
data_expected_3d[:][:][1] = (data_expected_3d[:][:][1] - 1.0) / 2.0
data_expected_3d[:][:][2] = data_expected_3d[:][:][2] - 2.0
assert_almost_equal(data_expected_3d, out_nd_3d.asnumpy())
# 4D Input
data_in_4d = nd.random.uniform(0, 1, (2, 3, 300, 300))
out_nd_4d = transforms.Normalize(mean=(0, 1, 2), std=(3, 2, 1))(data_in_4d)
data_expected_4d = data_in_4d.asnumpy()
data_expected_4d[0][:][:][0] = data_expected_4d[0][:][:][0] / 3.0
data_expected_4d[0][:][:][1] = (data_expected_4d[0][:][:][1] - 1.0) / 2.0
data_expected_4d[0][:][:][2] = data_expected_4d[0][:][:][2] - 2.0
data_expected_4d[1][:][:][0] = data_expected_4d[1][:][:][0] / 3.0
data_expected_4d[1][:][:][1] = (data_expected_4d[1][:][:][1] - 1.0) / 2.0
data_expected_4d[1][:][:][2] = data_expected_4d[1][:][:][2] - 2.0
assert_almost_equal(data_expected_4d, out_nd_4d.asnumpy())
# Invalid Input - Neither 3D or 4D input
invalid_data_in = nd.random.uniform(0, 1, (5, 5, 3, 300, 300))
normalize_transformer = transforms.Normalize(mean=(0, 1, 2), std=(3, 2, 1))
assertRaises(MXNetError, normalize_transformer, invalid_data_in)
# Invalid Input - Channel neither 1 or 3
invalid_data_in = nd.random.uniform(0, 1, (5, 4, 300, 300))
normalize_transformer = transforms.Normalize(mean=(0, 1, 2), std=(3, 2, 1))
assertRaises(MXNetError, normalize_transformer, invalid_data_in)
def test_resize():
def _test_resize_with_diff_type(dtype):
# test normal case
data_in = nd.random.uniform(0, 255, (300, 200, 3)).astype(dtype)
out_nd = transforms.Resize(200)(data_in)
data_expected = mx.image.imresize(data_in, 200, 200, 1)
assert_almost_equal(out_nd.asnumpy(), data_expected.asnumpy())
# test 4D input
data_bath_in = nd.random.uniform(0, 255, (3, 300, 200, 3)).astype(dtype)
out_batch_nd = transforms.Resize(200)(data_bath_in)
for i in range(len(out_batch_nd)):
assert_almost_equal(mx.image.imresize(data_bath_in[i], 200, 200, 1).asnumpy(),
out_batch_nd[i].asnumpy())
# test interp = 2
out_nd = transforms.Resize(200, interpolation=2)(data_in)
data_expected = mx.image.imresize(data_in, 200, 200, 2)
assert_almost_equal(out_nd.asnumpy(), data_expected.asnumpy())
# test height not equals to width
out_nd = transforms.Resize((200, 100))(data_in)
data_expected = mx.image.imresize(data_in, 200, 100, 1)
assert_almost_equal(out_nd.asnumpy(), data_expected.asnumpy())
# test keep_ratio
out_nd = transforms.Resize(150, keep_ratio=True)(data_in)
data_expected = mx.image.imresize(data_in, 150, 225, 1)
assert_almost_equal(out_nd.asnumpy(), data_expected.asnumpy())
# test size below zero
invalid_transform = transforms.Resize(-150, keep_ratio=True)
assertRaises(MXNetError, invalid_transform, data_in)
# test size more than 2:
invalid_transform = transforms.Resize((100, 100, 100), keep_ratio=True)
assertRaises(MXNetError, invalid_transform, data_in)
for dtype in ['uint8', 'float32', 'float64']:
_test_resize_with_diff_type(dtype)
def test_crop_resize():
def _test_crop_resize_with_diff_type(dtype):
# test normal case
data_in = nd.arange(60).reshape((5, 4, 3)).astype(dtype)
out_nd = transforms.CropResize(0, 0, 3, 2)(data_in)
out_np = out_nd.asnumpy()
assert(out_np.sum() == 180)
assert((out_np[0:2,1,1].flatten() == [4, 16]).all())
# test 4D input
data_bath_in = nd.arange(180).reshape((2, 6, 5, 3)).astype(dtype)
out_batch_nd = transforms.CropResize(1, 2, 3, 4)(data_bath_in)
out_batch_np = out_batch_nd.asnumpy()
assert(out_batch_np.sum() == 7524)
assert((out_batch_np[0:2,0:4,1,1].flatten() == [37, 52, 67, 82, 127, 142, 157, 172]).all())
# test normal case with resize
data_in = nd.random.uniform(0, 255, (300, 200, 3)).astype(dtype)
out_nd = transforms.CropResize(0, 0, 100, 50, (25, 25), 1)(data_in)
data_expected = transforms.Resize(size=25, interpolation=1)(nd.slice(data_in, (0, 0, 0), (50, 100, 3)))
assert_almost_equal(out_nd.asnumpy(), data_expected.asnumpy())
# test 4D input with resize
data_bath_in = nd.random.uniform(0, 255, (3, 300, 200, 3)).astype(dtype)
out_batch_nd = transforms.CropResize(0, 0, 100, 50, (25, 25), 1)(data_bath_in)
for i in range(len(out_batch_nd)):
actual = transforms.Resize(size=25, interpolation=1)(nd.slice(data_bath_in[i], (0, 0, 0), (50, 100, 3))).asnumpy()
expected = out_batch_nd[i].asnumpy()
assert_almost_equal(expected, actual)
# test with resize height and width should be greater than 0
transformer = transforms.CropResize(0, 0, 100, 50, (-25, 25), 1)
assertRaises(MXNetError, transformer, data_in)
# test height and width should be greater than 0
transformer = transforms.CropResize(0, 0, -100, -50)
assertRaises(MXNetError, transformer, data_in)
# test cropped area is bigger than input data
transformer = transforms.CropResize(150, 200, 200, 500)
assertRaises(MXNetError, transformer, data_in)
assertRaises(MXNetError, transformer, data_bath_in)
for dtype in ['uint8', 'float32', 'float64']:
_test_crop_resize_with_diff_type(dtype)
# test nd.image.crop backward
def test_crop_backward(test_nd_arr, TestCase):
a_np = test_nd_arr.asnumpy()
b_np = a_np[(slice(TestCase.y, TestCase.y + TestCase.height), slice(TestCase.x, TestCase.x + TestCase.width), slice(0, 3))]
data = mx.sym.Variable('data')
crop_sym = mx.sym.image.crop(data, TestCase.x, TestCase.y, TestCase.width, TestCase.height)
expected_in_grad = np.zeros_like(a_np)
expected_in_grad[(slice(TestCase.y, TestCase.y + TestCase.height), slice(TestCase.x, TestCase.x + TestCase.width), slice(0, 3))] = b_np
check_symbolic_backward(crop_sym, [a_np], [b_np], [expected_in_grad])
TestCase = namedtuple('TestCase', ['x', 'y', 'width', 'height'])
test_list = [TestCase(0, 0, 3, 3), TestCase(2, 1, 1, 2), TestCase(0, 1, 3, 2)]
for dtype in ['uint8', 'float32', 'float64']:
data_in = nd.arange(60).reshape((5, 4, 3)).astype(dtype)
for test_case in test_list:
test_crop_backward(data_in, test_case)
def test_flip_left_right():
for width in range(3, 301, 7):
data_in = np.random.uniform(0, 255, (300, width, 3)).astype(dtype=np.uint8)
flip_in = data_in[:, ::-1, :]
data_trans = nd.image.flip_left_right(nd.array(data_in, dtype='uint8'))
assert_almost_equal(flip_in, data_trans.asnumpy())
def test_flip_top_bottom():
for height in range(3, 301, 7):
data_in = np.random.uniform(0, 255, (height, 300, 3)).astype(dtype=np.uint8)
flip_in = data_in[::-1, :, :]
data_trans = nd.image.flip_top_bottom(nd.array(data_in, dtype='uint8'))
assert_almost_equal(flip_in, data_trans.asnumpy())
def test_transformer():
from mxnet.gluon.data.vision import transforms
transform = transforms.Compose([
transforms.Resize(300),
transforms.Resize(300, keep_ratio=True),
transforms.CenterCrop(256),
transforms.RandomCrop(256, pad=16),
transforms.RandomResizedCrop(224),
transforms.RandomFlipLeftRight(),
transforms.RandomColorJitter(0.1, 0.1, 0.1, 0.1),
transforms.RandomBrightness(0.1),
transforms.RandomContrast(0.1),
transforms.RandomSaturation(0.1),
transforms.RandomHue(0.1),
transforms.RandomLighting(0.1),
transforms.ToTensor(),
transforms.RandomRotation([-10., 10.]),
transforms.Normalize([0, 0, 0], [1, 1, 1])])
transform(mx.nd.ones((245, 480, 3), dtype='uint8')).wait_to_read()
def test_random_crop():
x = mx.nd.ones((245, 480, 3), dtype='uint8')
y = mx.nd.image.random_crop(x, width=100, height=100)
assert y.shape == (100, 100, 3)
def test_random_resize_crop():
x = mx.nd.ones((245, 480, 3), dtype='uint8')
y = mx.nd.image.random_resized_crop(x, width=100, height=100)
assert y.shape == (100, 100, 3)
def test_hybrid_transformer():
from mxnet.gluon.data.vision import transforms
transform = transforms.HybridCompose([
transforms.Resize(300),
transforms.Resize(300, keep_ratio=True),
transforms.CenterCrop(256),
transforms.RandomCrop(256, pad=16),
transforms.RandomFlipLeftRight(),
transforms.RandomColorJitter(0.1, 0.1, 0.1, 0.1),
transforms.RandomBrightness(0.1),
transforms.RandomContrast(0.1),
transforms.RandomSaturation(0.1),
transforms.RandomHue(0.1),
transforms.RandomLighting(0.1),
transforms.ToTensor(),
transforms.Normalize([0, 0, 0], [1, 1, 1])])
transform(mx.nd.ones((245, 480, 3), dtype='uint8')).wait_to_read()
def test_rotate():
transformer = transforms.Rotate(10.)
assertRaises(TypeError, transformer, mx.nd.ones((3, 30, 60), dtype='uint8'))
single_image = mx.nd.ones((3, 30, 60), dtype='float32')
single_output = transformer(single_image)
assert same(single_output.shape, (3, 30, 60))
batch_image = mx.nd.ones((3, 3, 30, 60), dtype='float32')
batch_output = transformer(batch_image)
assert same(batch_output.shape, (3, 3, 30, 60))
input_image = nd.array([[[0., 0., 0.],
[0., 0., 1.],
[0., 0., 0.]]])
rotation_angles_expected_outs = [
(90., nd.array([[[0., 1., 0.],
[0., 0., 0.],
[0., 0., 0.]]])),
(180., nd.array([[[0., 0., 0.],
[1., 0., 0.],
[0., 0., 0.]]])),
(270., nd.array([[[0., 0., 0.],
[0., 0., 0.],
[0., 1., 0.]]])),
(360., nd.array([[[0., 0., 0.],
[0., 0., 1.],
[0., 0., 0.]]])),
]
for rot_angle, expected_result in rotation_angles_expected_outs:
transformer = transforms.Rotate(rot_angle)
ans = transformer(input_image)
print(ans, expected_result)
assert_almost_equal(ans, expected_result, atol=1e-6)
def test_random_rotation():
# test exceptions for probability input outside of [0,1]
assertRaises(ValueError, transforms.RandomRotation, [-10, 10.], rotate_with_proba=1.1)
assertRaises(ValueError, transforms.RandomRotation, [-10, 10.], rotate_with_proba=-0.3)
# test `forward`
transformer = transforms.RandomRotation([-10, 10.])
assertRaises(TypeError, transformer, mx.nd.ones((3, 30, 60), dtype='uint8'))
single_image = mx.nd.ones((3, 30, 60), dtype='float32')
single_output = transformer(single_image)
assert same(single_output.shape, (3, 30, 60))
batch_image = mx.nd.ones((3, 3, 30, 60), dtype='float32')
batch_output = transformer(batch_image)
assert same(batch_output.shape, (3, 3, 30, 60))
# test identity (rotate_with_proba = 0)
transformer = transforms.RandomRotation([-100., 100.], rotate_with_proba=0.0)
data = mx.nd.random_normal(shape=(3, 30, 60))
assert_almost_equal(data, transformer(data))
@xfail_when_nonstandard_decimal_separator
def test_rotate():
transformer = transforms.Rotate(10.)
assertRaises(TypeError, transformer, mx.nd.ones((3, 30, 60), dtype='uint8'))
single_image = mx.nd.ones((3, 30, 60), dtype='float32')
single_output = transformer(single_image)
assert same(single_output.shape, (3, 30, 60))
batch_image = mx.nd.ones((3, 3, 30, 60), dtype='float32')
batch_output = transformer(batch_image)
assert same(batch_output.shape, (3, 3, 30, 60))
input_image = nd.array([[[0., 0., 0.],
[0., 0., 1.],
[0., 0., 0.]]])
rotation_angles_expected_outs = [
(90., nd.array([[[0., 1., 0.],
[0., 0., 0.],
[0., 0., 0.]]])),
(180., nd.array([[[0., 0., 0.],
[1., 0., 0.],
[0., 0., 0.]]])),
(270., nd.array([[[0., 0., 0.],
[0., 0., 0.],
[0., 1., 0.]]])),
(360., nd.array([[[0., 0., 0.],
[0., 0., 1.],
[0., 0., 0.]]])),
]
for rot_angle, expected_result in rotation_angles_expected_outs:
transformer = transforms.Rotate(rot_angle)
ans = transformer(input_image)
print(ans, expected_result)
assert_almost_equal(ans, expected_result, atol=1e-6)
def test_random_rotation():
# test exceptions for probability input outside of [0,1]
assertRaises(ValueError, transforms.RandomRotation, [-10, 10.], rotate_with_proba=1.1)
assertRaises(ValueError, transforms.RandomRotation, [-10, 10.], rotate_with_proba=-0.3)
# test `forward`
transformer = transforms.RandomRotation([-10, 10.])
assertRaises(TypeError, transformer, mx.nd.ones((3, 30, 60), dtype='uint8'))
single_image = mx.nd.ones((3, 30, 60), dtype='float32')
single_output = transformer(single_image)
assert same(single_output.shape, (3, 30, 60))
batch_image = mx.nd.ones((3, 3, 30, 60), dtype='float32')
batch_output = transformer(batch_image)
assert same(batch_output.shape, (3, 3, 30, 60))
# test identity (rotate_with_proba = 0)
transformer = transforms.RandomRotation([-100., 100.], rotate_with_proba=0.0)
data = mx.nd.random_normal(shape=(3, 30, 60))
assert_almost_equal(data, transformer(data))
def test_random_transforms():
from mxnet.gluon.data.vision import transforms
counter = 0
def transform_fn(x):
nonlocal counter
counter += 1
return x
transform = transforms.Compose([transforms.RandomApply(transform_fn, 0.5)])
img = mx.np.ones((10, 10, 3), dtype='uint8')
iteration = 10000
num_apply = 0
for _ in range(iteration):
out = transform(img)
assert counter == pytest.approx(5000, 1e-1)
@xfail_when_nonstandard_decimal_separator
def test_random_gray():
from mxnet.gluon.data.vision import transforms
transform = transforms.RandomGray(0.5)
img = mx.nd.ones((4, 4, 3), dtype='uint8')
pixel = img[0, 0, 0].asnumpy()
iteration = 1000
num_apply = 0
for _ in range(iteration):
out = transform(img)
if out[0][0][0].asnumpy() != pixel:
num_apply += 1
assert_almost_equal(num_apply/float(iteration), 0.5, 0.1)
transform = transforms.RandomGray(0.5)
transform.hybridize()
img = mx.nd.ones((4, 4, 3), dtype='uint8')
pixel = img[0, 0, 0].asnumpy()
iteration = 1000
num_apply = 0
for _ in range(iteration):
out = transform(img)
if out[0][0][0].asnumpy() != pixel:
num_apply += 1
assert_almost_equal(num_apply/float(iteration), 0.5, 0.1)
def test_bbox_random_flip():
from mxnet.gluon.contrib.data.vision.transforms.bbox import ImageBboxRandomFlipLeftRight
transform = ImageBboxRandomFlipLeftRight(0.5)
iteration = 200
num_apply = 0
for _ in range(iteration):
img = mx.nd.ones((10, 10, 3), dtype='uint8')
img[0, 0, 0] = 10
bbox = mx.nd.array([[1, 2, 3, 4, 0]])
im_out, im_bbox = transform(img, bbox)
if im_bbox[0][0].asnumpy() != 1 and im_out[0, 0, 0].asnumpy() != 10:
num_apply += 1
assert_almost_equal(np.array([num_apply])/float(iteration), 0.5, 0.5)
def test_bbox_crop():
from mxnet.gluon.contrib.data.vision.transforms.bbox import ImageBboxCrop
transform = ImageBboxCrop((0, 0, 3, 3))
img = mx.nd.ones((10, 10, 3), dtype='uint8')
bbox = mx.nd.array([[0, 1, 3, 4, 0]])
im_out, im_bbox = transform(img, bbox)
assert im_out.shape == (3, 3, 3)
assert im_bbox[0][2] == 3
|
|
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
from typing import TYPE_CHECKING
import warnings
from azure.core.exceptions import ClientAuthenticationError, HttpResponseError, ResourceExistsError, ResourceNotFoundError, map_error
from azure.core.paging import ItemPaged
from azure.core.pipeline import PipelineResponse
from azure.core.pipeline.transport import HttpRequest, HttpResponse
from azure.core.polling import LROPoller, NoPolling, PollingMethod
from azure.mgmt.core.exceptions import ARMErrorFormat
from azure.mgmt.core.polling.arm_polling import ARMPolling
from .. import models as _models
if TYPE_CHECKING:
# pylint: disable=unused-import,ungrouped-imports
from typing import Any, Callable, Dict, Generic, Iterable, Optional, TypeVar, Union
T = TypeVar('T')
ClsType = Optional[Callable[[PipelineResponse[HttpRequest, HttpResponse], T, Dict[str, Any]], Any]]
class VirtualNetworkTapsOperations(object):
"""VirtualNetworkTapsOperations operations.
You should not instantiate this class directly. Instead, you should create a Client instance that
instantiates it for you and attaches it as an attribute.
:ivar models: Alias to model classes used in this operation group.
:type models: ~azure.mgmt.network.v2020_07_01.models
:param client: Client for service requests.
:param config: Configuration of service client.
:param serializer: An object model serializer.
:param deserializer: An object model deserializer.
"""
models = _models
def __init__(self, client, config, serializer, deserializer):
self._client = client
self._serialize = serializer
self._deserialize = deserializer
self._config = config
def _delete_initial(
self,
resource_group_name, # type: str
tap_name, # type: str
**kwargs # type: Any
):
# type: (...) -> None
cls = kwargs.pop('cls', None) # type: ClsType[None]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2020-07-01"
accept = "application/json"
# Construct URL
url = self._delete_initial.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'tapName': self._serialize.url("tap_name", tap_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request = self._client.delete(url, query_parameters, header_parameters)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 202, 204]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
if cls:
return cls(pipeline_response, None, {})
_delete_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/virtualNetworkTaps/{tapName}'} # type: ignore
def begin_delete(
self,
resource_group_name, # type: str
tap_name, # type: str
**kwargs # type: Any
):
# type: (...) -> LROPoller[None]
"""Deletes the specified virtual network tap.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param tap_name: The name of the virtual network tap.
:type tap_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: By default, your polling method will be ARMPolling.
Pass in False for this operation to not poll, or pass in your own initialized polling object for a personal polling strategy.
:paramtype polling: bool or ~azure.core.polling.PollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present.
:return: An instance of LROPoller that returns either None or the result of cls(response)
:rtype: ~azure.core.polling.LROPoller[None]
:raises ~azure.core.exceptions.HttpResponseError:
"""
polling = kwargs.pop('polling', True) # type: Union[bool, PollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType[None]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = self._delete_initial(
resource_group_name=resource_group_name,
tap_name=tap_name,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
kwargs.pop('content_type', None)
def get_long_running_output(pipeline_response):
if cls:
return cls(pipeline_response, None, {})
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'tapName': self._serialize.url("tap_name", tap_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
if polling is True: polling_method = ARMPolling(lro_delay, lro_options={'final-state-via': 'location'}, path_format_arguments=path_format_arguments, **kwargs)
elif polling is False: polling_method = NoPolling()
else: polling_method = polling
if cont_token:
return LROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return LROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_delete.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/virtualNetworkTaps/{tapName}'} # type: ignore
def get(
self,
resource_group_name, # type: str
tap_name, # type: str
**kwargs # type: Any
):
# type: (...) -> "_models.VirtualNetworkTap"
"""Gets information about the specified virtual network tap.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param tap_name: The name of virtual network tap.
:type tap_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: VirtualNetworkTap, or the result of cls(response)
:rtype: ~azure.mgmt.network.v2020_07_01.models.VirtualNetworkTap
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.VirtualNetworkTap"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2020-07-01"
accept = "application/json"
# Construct URL
url = self.get.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'tapName': self._serialize.url("tap_name", tap_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request = self._client.get(url, query_parameters, header_parameters)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = self._deserialize('VirtualNetworkTap', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
get.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/virtualNetworkTaps/{tapName}'} # type: ignore
def _create_or_update_initial(
self,
resource_group_name, # type: str
tap_name, # type: str
parameters, # type: "_models.VirtualNetworkTap"
**kwargs # type: Any
):
# type: (...) -> "_models.VirtualNetworkTap"
cls = kwargs.pop('cls', None) # type: ClsType["_models.VirtualNetworkTap"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2020-07-01"
content_type = kwargs.pop("content_type", "application/json")
accept = "application/json"
# Construct URL
url = self._create_or_update_initial.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'tapName': self._serialize.url("tap_name", tap_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str')
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
body_content_kwargs = {} # type: Dict[str, Any]
body_content = self._serialize.body(parameters, 'VirtualNetworkTap')
body_content_kwargs['content'] = body_content
request = self._client.put(url, query_parameters, header_parameters, **body_content_kwargs)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 201]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
if response.status_code == 200:
deserialized = self._deserialize('VirtualNetworkTap', pipeline_response)
if response.status_code == 201:
deserialized = self._deserialize('VirtualNetworkTap', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
_create_or_update_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/virtualNetworkTaps/{tapName}'} # type: ignore
def begin_create_or_update(
self,
resource_group_name, # type: str
tap_name, # type: str
parameters, # type: "_models.VirtualNetworkTap"
**kwargs # type: Any
):
# type: (...) -> LROPoller["_models.VirtualNetworkTap"]
"""Creates or updates a Virtual Network Tap.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param tap_name: The name of the virtual network tap.
:type tap_name: str
:param parameters: Parameters supplied to the create or update virtual network tap operation.
:type parameters: ~azure.mgmt.network.v2020_07_01.models.VirtualNetworkTap
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: By default, your polling method will be ARMPolling.
Pass in False for this operation to not poll, or pass in your own initialized polling object for a personal polling strategy.
:paramtype polling: bool or ~azure.core.polling.PollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present.
:return: An instance of LROPoller that returns either VirtualNetworkTap or the result of cls(response)
:rtype: ~azure.core.polling.LROPoller[~azure.mgmt.network.v2020_07_01.models.VirtualNetworkTap]
:raises ~azure.core.exceptions.HttpResponseError:
"""
polling = kwargs.pop('polling', True) # type: Union[bool, PollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType["_models.VirtualNetworkTap"]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = self._create_or_update_initial(
resource_group_name=resource_group_name,
tap_name=tap_name,
parameters=parameters,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
kwargs.pop('content_type', None)
def get_long_running_output(pipeline_response):
deserialized = self._deserialize('VirtualNetworkTap', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'tapName': self._serialize.url("tap_name", tap_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
if polling is True: polling_method = ARMPolling(lro_delay, lro_options={'final-state-via': 'azure-async-operation'}, path_format_arguments=path_format_arguments, **kwargs)
elif polling is False: polling_method = NoPolling()
else: polling_method = polling
if cont_token:
return LROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return LROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_create_or_update.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/virtualNetworkTaps/{tapName}'} # type: ignore
def update_tags(
self,
resource_group_name, # type: str
tap_name, # type: str
tap_parameters, # type: "_models.TagsObject"
**kwargs # type: Any
):
# type: (...) -> "_models.VirtualNetworkTap"
"""Updates an VirtualNetworkTap tags.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param tap_name: The name of the tap.
:type tap_name: str
:param tap_parameters: Parameters supplied to update VirtualNetworkTap tags.
:type tap_parameters: ~azure.mgmt.network.v2020_07_01.models.TagsObject
:keyword callable cls: A custom type or function that will be passed the direct response
:return: VirtualNetworkTap, or the result of cls(response)
:rtype: ~azure.mgmt.network.v2020_07_01.models.VirtualNetworkTap
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.VirtualNetworkTap"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2020-07-01"
content_type = kwargs.pop("content_type", "application/json")
accept = "application/json"
# Construct URL
url = self.update_tags.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'tapName': self._serialize.url("tap_name", tap_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str')
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
body_content_kwargs = {} # type: Dict[str, Any]
body_content = self._serialize.body(tap_parameters, 'TagsObject')
body_content_kwargs['content'] = body_content
request = self._client.patch(url, query_parameters, header_parameters, **body_content_kwargs)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = self._deserialize('VirtualNetworkTap', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
update_tags.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/virtualNetworkTaps/{tapName}'} # type: ignore
def list_all(
self,
**kwargs # type: Any
):
# type: (...) -> Iterable["_models.VirtualNetworkTapListResult"]
"""Gets all the VirtualNetworkTaps in a subscription.
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either VirtualNetworkTapListResult or the result of cls(response)
:rtype: ~azure.core.paging.ItemPaged[~azure.mgmt.network.v2020_07_01.models.VirtualNetworkTapListResult]
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.VirtualNetworkTapListResult"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2020-07-01"
accept = "application/json"
def prepare_request(next_link=None):
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
if not next_link:
# Construct URL
url = self.list_all.metadata['url'] # type: ignore
path_format_arguments = {
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
request = self._client.get(url, query_parameters, header_parameters)
else:
url = next_link
query_parameters = {} # type: Dict[str, Any]
request = self._client.get(url, query_parameters, header_parameters)
return request
def extract_data(pipeline_response):
deserialized = self._deserialize('VirtualNetworkTapListResult', pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return deserialized.next_link or None, iter(list_of_elem)
def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
return pipeline_response
return ItemPaged(
get_next, extract_data
)
list_all.metadata = {'url': '/subscriptions/{subscriptionId}/providers/Microsoft.Network/virtualNetworkTaps'} # type: ignore
def list_by_resource_group(
self,
resource_group_name, # type: str
**kwargs # type: Any
):
# type: (...) -> Iterable["_models.VirtualNetworkTapListResult"]
"""Gets all the VirtualNetworkTaps in a subscription.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either VirtualNetworkTapListResult or the result of cls(response)
:rtype: ~azure.core.paging.ItemPaged[~azure.mgmt.network.v2020_07_01.models.VirtualNetworkTapListResult]
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.VirtualNetworkTapListResult"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2020-07-01"
accept = "application/json"
def prepare_request(next_link=None):
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
if not next_link:
# Construct URL
url = self.list_by_resource_group.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
request = self._client.get(url, query_parameters, header_parameters)
else:
url = next_link
query_parameters = {} # type: Dict[str, Any]
request = self._client.get(url, query_parameters, header_parameters)
return request
def extract_data(pipeline_response):
deserialized = self._deserialize('VirtualNetworkTapListResult', pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return deserialized.next_link or None, iter(list_of_elem)
def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
return pipeline_response
return ItemPaged(
get_next, extract_data
)
list_by_resource_group.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/virtualNetworkTaps'} # type: ignore
|
|
# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Helper classes for tensor shape inference."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from mxconsole.protobuf import tensor_shape_pb2
from mxconsole.util import compat
class Dimension(object):
"""Represents the value of one dimension in a TensorShape."""
def __init__(self, value):
"""Creates a new Dimension with the given value."""
if value is None:
self._value = None
else:
self._value = int(value)
if (not isinstance(value, compat.bytes_or_text_types)
and self._value != value):
raise ValueError("Ambiguous dimension: %s" % value)
if self._value < 0:
raise ValueError("Dimension %d must be >= 0" % self._value)
def __repr__(self):
return "Dimension(%s)" % repr(self._value)
def __str__(self):
value = self._value
return "?" if value is None else str(value)
def __eq__(self, other):
"""Returns true if `other` has the same known value as this Dimension."""
try:
other = as_dimension(other)
except (TypeError, ValueError):
return NotImplemented
if self._value is None or other.value is None:
return None
return self._value == other.value
def __ne__(self, other):
"""Returns true if `other` has a different known value from `self`."""
try:
other = as_dimension(other)
except (TypeError, ValueError):
return NotImplemented
if self._value is None or other.value is None:
return None
return self._value != other.value
def __int__(self):
return self._value
def __index__(self):
# Allow use in Python 3 range
return self._value
@property
def value(self):
"""The value of this dimension, or None if it is unknown."""
return self._value
def is_compatible_with(self, other):
"""Returns true if `other` is compatible with this Dimension.
Two known Dimensions are compatible if they have the same value.
An unknown Dimension is compatible with all other Dimensions.
Args:
other: Another Dimension.
Returns:
True if this Dimension and `other` are compatible.
"""
other = as_dimension(other)
return (self._value is None
or other.value is None
or self._value == other.value)
def assert_is_compatible_with(self, other):
"""Raises an exception if `other` is not compatible with this Dimension.
Args:
other: Another Dimension.
Raises:
ValueError: If `self` and `other` are not compatible (see
is_compatible_with).
"""
if not self.is_compatible_with(other):
raise ValueError("Dimensions %s and %s are not compatible"
% (self, other))
def merge_with(self, other):
"""Returns a Dimension that combines the information in `self` and `other`.
Dimensions are combined as follows:
```python
Dimension(n) .merge_with(Dimension(n)) == Dimension(n)
Dimension(n) .merge_with(Dimension(None)) == Dimension(n)
Dimension(None).merge_with(Dimension(n)) == Dimension(n)
Dimension(None).merge_with(Dimension(None)) == Dimension(None)
Dimension(n) .merge_with(Dimension(m)) raises ValueError for n != m
```
Args:
other: Another Dimension.
Returns:
A Dimension containing the combined information of `self` and
`other`.
Raises:
ValueError: If `self` and `other` are not compatible (see
is_compatible_with).
"""
other = as_dimension(other)
self.assert_is_compatible_with(other)
if self._value is None:
return Dimension(other.value)
else:
return Dimension(self._value)
def __add__(self, other):
"""Returns the sum of `self` and `other`.
Dimensions are summed as follows:
Dimension(m) + Dimension(n) == Dimension(m + n)
Dimension(m) + Dimension(None) == Dimension(None)
Dimension(None) + Dimension(n) == Dimension(None)
Dimension(None) + Dimension(None) == Dimension(None)
Args:
other: Another Dimension.
Returns:
A Dimension whose value is the sum of `self` and `other`.
"""
other = as_dimension(other)
if self._value is None or other.value is None:
return Dimension(None)
else:
return Dimension(self._value + other.value)
def __sub__(self, other):
"""Returns the subtraction of `other` from `self`.
Dimensions are subtracted as follows:
Dimension(m) - Dimension(n) == Dimension(m - n)
Dimension(m) - Dimension(None) == Dimension(None)
Dimension(None) - Dimension(n) == Dimension(None)
Dimension(None) - Dimension(None) == Dimension(None)
Args:
other: Another Dimension.
Returns:
A Dimension whose value is the subtraction of sum of `other` from `self`.
"""
other = as_dimension(other)
if self._value is None or other.value is None:
return Dimension(None)
else:
return Dimension(self._value - other.value)
def __mul__(self, other):
"""Returns the product of `self` and `other`.
Dimensions are summed as follows:
```
Dimension(m) * Dimension(n) == Dimension(m * n)
Dimension(m) * Dimension(None) == Dimension(None)
Dimension(None) * Dimension(n) == Dimension(None)
Dimension(None) * Dimension(None) == Dimension(None)
```
Args:
other: Another Dimension.
Returns:
A Dimension whose value is the product of `self` and `other`.
"""
other = as_dimension(other)
if self._value is None or other.value is None:
return Dimension(None)
else:
return Dimension(self._value * other.value)
def __floordiv__(self, other):
"""Returns the quotient of `self` and `other` rounded down.
Dimensions are divided as follows:
Dimension(m) // Dimension(n) == Dimension(m // n)
Dimension(m) // Dimension(None) == Dimension(None)
Dimension(None) // Dimension(n) == Dimension(None)
Dimension(None) // Dimension(None) == Dimension(None)
Args:
other: Another `Dimension`.
Returns:
A `Dimension` whose value is the integer quotient of `self` and `other`.
"""
other = as_dimension(other)
if self._value is None or other.value is None:
return Dimension(None)
else:
return Dimension(self._value // other.value)
def __div__(self, other):
"""DEPRECATED: Use `__floordiv__` via `x // y` instead.
This function exists only for backwards compatibility purposes; new code
should use `__floordiv__` via the syntax `x // y`. Using `x // y`
communicates clearly that the result rounds down, and is forward compatible
to Python 3.
Args:
other: Another `Dimension`.
Returns:
A `Dimension` whose value is the integer quotient of `self` and `other`.
"""
return self // other
def __mod__(self, other):
"""Returns `self` modulo `other.
Dimension moduli are computed as follows:
Dimension(m) % Dimension(n) == Dimension(m % n)
Dimension(m) % Dimension(None) == Dimension(None)
Dimension(None) % Dimension(n) == Dimension(None)
Dimension(None) % Dimension(None) == Dimension(None)
Args:
other: Another Dimension.
Returns:
A Dimension whose value is `self` modulo `other`.
"""
other = as_dimension(other)
if self._value is None or other.value is None:
return Dimension(None)
else:
return Dimension(self._value % other.value)
def __lt__(self, other):
"""Returns True if `self` is known to be less than `other`.
Dimensions are compared as follows:
Dimension(m) < Dimension(n) == m < n
Dimension(m) < Dimension(None) == None
Dimension(None) < Dimension(n) == None
Dimension(None) < Dimension(None) == None
Args:
other: Another Dimension.
Returns:
The value of `self.value < other.value` if both are known, otherwise
None.
"""
other = as_dimension(other)
if self._value is None or other.value is None:
return None
else:
return self._value < other.value
def __le__(self, other):
"""Returns True if `self` is known to be less than or equal to `other`.
Dimensions are compared as follows:
Dimension(m) <= Dimension(n) == m <= n
Dimension(m) <= Dimension(None) == None
Dimension(None) <= Dimension(n) == None
Dimension(None) <= Dimension(None) == None
Args:
other: Another Dimension.
Returns:
The value of `self.value <= other.value` if both are known, otherwise
None.
"""
other = as_dimension(other)
if self._value is None or other.value is None:
return None
else:
return self._value <= other.value
def __gt__(self, other):
"""Returns True if `self` is known to be greater than `other`.
Dimensions are compared as follows:
Dimension(m) > Dimension(n) == m > n
Dimension(m) > Dimension(None) == None
Dimension(None) > Dimension(n) == None
Dimension(None) > Dimension(None) == None
Args:
other: Another Dimension.
Returns:
The value of `self.value > other.value` if both are known, otherwise
None.
"""
other = as_dimension(other)
if self._value is None or other.value is None:
return None
else:
return self._value > other.value
def __ge__(self, other):
"""Returns True if `self` is known to be greater than or equal to `other`.
Dimensions are compared as follows:
Dimension(m) >= Dimension(n) == m >= n
Dimension(m) >= Dimension(None) == None
Dimension(None) >= Dimension(n) == None
Dimension(None) >= Dimension(None) == None
Args:
other: Another Dimension.
Returns:
The value of `self.value >= other.value` if both are known, otherwise
None.
"""
other = as_dimension(other)
if self._value is None or other.value is None:
return None
else:
return self._value >= other.value
def as_dimension(value):
"""Converts the given value to a Dimension.
A Dimenson input will be returned unmodified.
An input of `None` will be converted to an unknown Dimension.
An integer input will be converted to a Dimension with that value.
Args:
value: The value to be converted.
Returns:
A Dimension corresponding to the given value.
"""
if isinstance(value, Dimension):
return value
else:
return Dimension(value)
class TensorShape(object):
"""Represents the shape of a `Tensor`.
A `TensorShape` represents a possibly-partial shape specification for a
`Tensor`. It may be one of the following:
* *Fully-known shape:* has a known number of dimensions and a known size
for each dimension.
* *Partially-known shape:* has a known number of dimensions, and an unknown
size for one or more dimension.
* *Unknown shape:* has an unknown number of dimensions, and an unknown
size in all dimensions.
If a tensor is produced by an operation of type `"Foo"`, its shape
may be inferred if there is a registered shape function for
`"Foo"`. See @{$adding_an_op#shape-functions-in-c$`Shape functions in C++`} for
details of shape functions and how to register them. Alternatively,
the shape may be set explicitly using
@{tf.Tensor.set_shape}.
"""
def __init__(self, dims):
"""Creates a new TensorShape with the given dimensions.
Args:
dims: A list of Dimensions, or None if the shape is unspecified.
DEPRECATED: A single integer is treated as a singleton list.
Raises:
TypeError: If dims cannot be converted to a list of dimensions.
"""
# TODO(irving): Eliminate the single integer special case.
if dims is None:
self._dims = None
elif isinstance(dims, compat.bytes_or_text_types):
raise TypeError("A string has ambiguous TensorShape, please wrap in a "
"list or convert to an int: %s" % dims)
elif isinstance(dims, tensor_shape_pb2.TensorShapeProto):
if dims.unknown_rank:
self._dims = None
else:
self._dims = [
# Protos store variable-size dimensions as -1
as_dimension(dim.size if dim.size != -1 else None)
for dim in dims.dim]
elif isinstance(dims, TensorShape):
self._dims = dims.dims
else:
try:
dims_iter = iter(dims)
except TypeError:
# Treat as a singleton dimension
self._dims = [as_dimension(dims)]
else:
# Got a list of dimensions
self._dims = [as_dimension(d) for d in dims_iter]
def __repr__(self):
return "TensorShape(%r)" % self._dims
def __str__(self):
if self.ndims is None:
return "<unknown>"
elif self.ndims == 1:
return "(%s,)" % self._dims[0]
else:
return "(%s)" % ", ".join(str(d) for d in self._dims)
@property
def dims(self):
"""Returns a list of Dimensions, or None if the shape is unspecified."""
return self._dims
@property
def ndims(self):
"""Returns the rank of this shape, or None if it is unspecified."""
if self._dims is None:
return None
else:
return len(self._dims)
def __len__(self):
"""Returns the rank of this shape, or raises ValueError if unspecified."""
if self._dims is None:
raise ValueError("Cannot take the length of Shape with unknown rank.")
return len(self._dims)
def __bool__(self):
"""Returns True if this shape contains non-zero information."""
return self._dims is not None
# Python 3 wants __bool__, Python 2.7 wants __nonzero__
__nonzero__ = __bool__
def __iter__(self):
"""Returns `self.dims` if the rank is known, otherwise raises ValueError."""
if self._dims is None:
raise ValueError("Cannot iterate over a shape with unknown rank.")
else:
return iter(self._dims)
def __getitem__(self, key):
"""Returns the value of a dimension or a shape, depending on the key.
Args:
key: If `key` is an integer, returns the dimension at that index;
otherwise if `key` is a slice, returns a TensorShape whose
dimensions are those selected by the slice from `self`.
Returns:
A dimension if `key` is an integer, or a `TensorShape` if `key` is a
slice.
Raises:
ValueError: If `key` is a slice, and any of its elements are negative, or
if `self` is completely unknown and the step is set.
"""
if self._dims is not None:
if isinstance(key, slice):
return TensorShape(self._dims[key])
else:
return self._dims[key]
else:
if isinstance(key, slice):
start = key.start if key.start is not None else 0
stop = key.stop
if key.step is not None:
# TODO(mrry): Handle these maybe.
raise ValueError("Steps are not yet handled")
if stop is None:
# NOTE(mrry): This implies that TensorShape(None) is compatible with
# TensorShape(None)[1:], which is obviously not true. It would be
# possible to track the number of dimensions symbolically,
# and perhaps we should do that.
return unknown_shape()
elif start < 0 or stop < 0:
# TODO(mrry): Handle this better, as it will be useful for handling
# suffixes of otherwise unknown shapes.
return unknown_shape()
else:
return unknown_shape(ndims=stop-start)
else:
return Dimension(None)
def num_elements(self):
"""Returns the total number of elements, or none for incomplete shapes."""
if self.is_fully_defined():
size = 1
for dim in self._dims:
size *= dim.value
return size
else:
return None
def merge_with(self, other):
"""Returns a `TensorShape` combining the information in `self` and `other`.
The dimensions in `self` and `other` are merged elementwise,
according to the rules defined for `Dimension.merge_with()`.
Args:
other: Another `TensorShape`.
Returns:
A `TensorShape` containing the combined information of `self` and
`other`.
Raises:
ValueError: If `self` and `other` are not compatible.
"""
other = as_shape(other)
if self._dims is None:
return other
else:
try:
self.assert_same_rank(other)
new_dims = []
for i, dim in enumerate(self._dims):
new_dims.append(dim.merge_with(other[i]))
return TensorShape(new_dims)
except ValueError:
raise ValueError("Shapes %s and %s are not compatible" %
(self, other))
def concatenate(self, other):
"""Returns the concatenation of the dimension in `self` and `other`.
*N.B.* If either `self` or `other` is completely unknown,
concatenation will discard information about the other shape. In
future, we might support concatenation that preserves this
information for use with slicing.
Args:
other: Another `TensorShape`.
Returns:
A `TensorShape` whose dimensions are the concatenation of the
dimensions in `self` and `other`.
"""
# TODO(mrry): Handle the case where we concatenate a known shape with a
# completely unknown shape, so that we can use the partial information.
other = as_shape(other)
if self._dims is None or other.dims is None:
return unknown_shape()
else:
return TensorShape(self._dims + other.dims)
def assert_same_rank(self, other):
"""Raises an exception if `self` and `other` do not have compatible ranks.
Args:
other: Another `TensorShape`.
Raises:
ValueError: If `self` and `other` do not represent shapes with the
same rank.
"""
other = as_shape(other)
if self.ndims is not None and other.ndims is not None:
if self.ndims != other.ndims:
raise ValueError(
"Shapes %s and %s must have the same rank" % (self, other))
def assert_has_rank(self, rank):
"""Raises an exception if `self` is not compatible with the given `rank`.
Args:
rank: An integer.
Raises:
ValueError: If `self` does not represent a shape with the given `rank`.
"""
if self.ndims not in (None, rank):
raise ValueError("Shape %s must have rank %d" % (self, rank))
def with_rank(self, rank):
"""Returns a shape based on `self` with the given rank.
This method promotes a completely unknown shape to one with a
known rank.
Args:
rank: An integer.
Returns:
A shape that is at least as specific as `self` with the given rank.
Raises:
ValueError: If `self` does not represent a shape with the given `rank`.
"""
try:
return self.merge_with(unknown_shape(ndims=rank))
except ValueError:
raise ValueError("Shape %s must have rank %d" % (self, rank))
def with_rank_at_least(self, rank):
"""Returns a shape based on `self` with at least the given rank.
Args:
rank: An integer.
Returns:
A shape that is at least as specific as `self` with at least the given
rank.
Raises:
ValueError: If `self` does not represent a shape with at least the given
`rank`.
"""
if self.ndims is not None and self.ndims < rank:
raise ValueError("Shape %s must have rank at least %d" % (self, rank))
else:
return self
def with_rank_at_most(self, rank):
"""Returns a shape based on `self` with at most the given rank.
Args:
rank: An integer.
Returns:
A shape that is at least as specific as `self` with at most the given
rank.
Raises:
ValueError: If `self` does not represent a shape with at most the given
`rank`.
"""
if self.ndims is not None and self.ndims > rank:
raise ValueError("Shape %s must have rank at most %d" % (self, rank))
else:
return self
def is_compatible_with(self, other):
"""Returns True iff `self` is compatible with `other`.
Two possibly-partially-defined shapes are compatible if there
exists a fully-defined shape that both shapes can represent. Thus,
compatibility allows the shape inference code to reason about
partially-defined shapes. For example:
* TensorShape(None) is compatible with all shapes.
* TensorShape([None, None]) is compatible with all two-dimensional
shapes, such as TensorShape([32, 784]), and also TensorShape(None). It is
not compatible with, for example, TensorShape([None]) or
TensorShape([None, None, None]).
* TensorShape([32, None]) is compatible with all two-dimensional shapes
with size 32 in the 0th dimension, and also TensorShape([None, None])
and TensorShape(None). It is not compatible with, for example,
TensorShape([32]), TensorShape([32, None, 1]) or TensorShape([64, None]).
* TensorShape([32, 784]) is compatible with itself, and also
TensorShape([32, None]), TensorShape([None, 784]), TensorShape([None,
None]) and TensorShape(None). It is not compatible with, for example,
TensorShape([32, 1, 784]) or TensorShape([None]).
The compatibility relation is reflexive and symmetric, but not
transitive. For example, TensorShape([32, 784]) is compatible with
TensorShape(None), and TensorShape(None) is compatible with
TensorShape([4, 4]), but TensorShape([32, 784]) is not compatible with
TensorShape([4, 4]).
Args:
other: Another TensorShape.
Returns:
True iff `self` is compatible with `other`.
"""
other = as_shape(other)
if self._dims is not None and other.dims is not None:
if self.ndims != other.ndims:
return False
for x_dim, y_dim in zip(self._dims, other.dims):
if not x_dim.is_compatible_with(y_dim):
return False
return True
def assert_is_compatible_with(self, other):
"""Raises exception if `self` and `other` do not represent the same shape.
This method can be used to assert that there exists a shape that both
`self` and `other` represent.
Args:
other: Another TensorShape.
Raises:
ValueError: If `self` and `other` do not represent the same shape.
"""
if not self.is_compatible_with(other):
raise ValueError("Shapes %s and %s are incompatible" % (self, other))
def is_fully_defined(self):
"""Returns True iff `self` is fully defined in every dimension."""
return (self._dims is not None
and all(dim.value is not None for dim in self._dims))
def assert_is_fully_defined(self):
"""Raises an exception if `self` is not fully defined in every dimension.
Raises:
ValueError: If `self` does not have a known value for every dimension.
"""
if not self.is_fully_defined():
raise ValueError("Shape %s is not fully defined" % self)
def as_list(self):
"""Returns a list of integers or `None` for each dimension.
Returns:
A list of integers or `None` for each dimension.
Raises:
ValueError: If `self` is an unknown shape with an unknown rank.
"""
if self._dims is None:
raise ValueError("as_list() is not defined on an unknown TensorShape.")
return [dim.value for dim in self._dims]
def as_proto(self):
"""Returns this shape as a `TensorShapeProto`."""
if self._dims is None:
return tensor_shape_pb2.TensorShapeProto(unknown_rank=True)
else:
return tensor_shape_pb2.TensorShapeProto(dim=[
tensor_shape_pb2.TensorShapeProto.Dim(
size=-1 if d.value is None else d.value)
for d in self._dims])
def __eq__(self, other):
"""Returns True if `self` is equivalent to `other`."""
try:
other = as_shape(other)
except TypeError:
return NotImplemented
return self._dims == other.dims
def __ne__(self, other):
"""Returns True if `self` is known to be different from `other`."""
try:
other = as_shape(other)
except TypeError:
return NotImplemented
if self.ndims is None or other.ndims is None:
raise ValueError("The inequality of unknown TensorShapes is undefined.")
if self.ndims != other.ndims:
return True
return self._dims != other.dims
def as_shape(shape):
"""Converts the given object to a TensorShape."""
if isinstance(shape, TensorShape):
return shape
else:
return TensorShape(shape)
def unknown_shape(ndims=None):
"""Returns an unknown TensorShape, optionally with a known rank.
Args:
ndims: (Optional) If specified, the number of dimensions in the shape.
Returns:
An unknown TensorShape.
"""
if ndims is None:
return TensorShape(None)
else:
return TensorShape([Dimension(None)] * ndims)
def scalar():
"""Returns a shape representing a scalar."""
return TensorShape([])
def vector(length):
"""Returns a shape representing a vector.
Args:
length: The length of the vector, which may be None if unknown.
Returns:
A TensorShape representing a vector of the given length.
"""
return TensorShape([length])
def matrix(rows, cols):
"""Returns a shape representing a matrix.
Args:
rows: The number of rows in the matrix, which may be None if unknown.
cols: The number of columns in the matrix, which may be None if unknown.
Returns:
A TensorShape representing a matrix of the given size.
"""
return TensorShape([rows, cols])
|
|
# Copyright (c) 2013 Intel, Inc.
# Copyright (c) 2013 OpenStack Foundation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import collections
from nova.compute import task_states
from nova.compute import vm_states
from nova import context
from nova import exception
from nova.i18n import _
from nova import objects
from nova.openstack.common import log as logging
from nova.pci import pci_device
from nova.pci import pci_stats
LOG = logging.getLogger(__name__)
class PciDevTracker(object):
"""Manage pci devices in a compute node.
This class fetches pci passthrough information from hypervisor
and trackes the usage of these devices.
It's called by compute node resource tracker to allocate and free
devices to/from instances, and to update the available pci passthrough
devices information from hypervisor periodically. The devices
information is updated to DB when devices information is changed.
"""
def __init__(self, node_id=None):
"""Create a pci device tracker.
If a node_id is passed in, it will fetch pci devices information
from database, otherwise, it will create an empty devices list
and the resource tracker will update the node_id information later.
"""
super(PciDevTracker, self).__init__()
self.stale = {}
self.node_id = node_id
self.stats = pci_stats.PciDeviceStats()
if node_id:
self.pci_devs = list(
objects.PciDeviceList.get_by_compute_node(context, node_id))
else:
self.pci_devs = []
self._initial_instance_usage()
def _initial_instance_usage(self):
self.allocations = collections.defaultdict(list)
self.claims = collections.defaultdict(list)
for dev in self.pci_devs:
uuid = dev['instance_uuid']
if dev['status'] == 'claimed':
self.claims[uuid].append(dev)
elif dev['status'] == 'allocated':
self.allocations[uuid].append(dev)
elif dev['status'] == 'available':
self.stats.add_device(dev)
@property
def all_devs(self):
return self.pci_devs
def save(self, context):
for dev in self.pci_devs:
if dev.obj_what_changed():
dev.save(context)
self.pci_devs = [dev for dev in self.pci_devs
if dev['status'] != 'deleted']
@property
def pci_stats(self):
return self.stats
def set_hvdevs(self, devices):
"""Sync the pci device tracker with hypervisor information.
To support pci device hot plug, we sync with the hypervisor
periodically, fetching all devices information from hypervisor,
update the tracker and sync the DB information.
Devices should not be hot-plugged when assigned to a guest,
but possibly the hypervisor has no such guarantee. The best
we can do is to give a warning if a device is changed
or removed while assigned.
"""
exist_addrs = set([dev['address'] for dev in self.pci_devs])
new_addrs = set([dev['address'] for dev in devices])
for existed in self.pci_devs:
if existed['address'] in exist_addrs - new_addrs:
try:
pci_device.remove(existed)
except exception.PciDeviceInvalidStatus as e:
LOG.warn(_("Trying to remove device with %(status)s "
"ownership %(instance_uuid)s because of "
"%(pci_exception)s"), {'status': existed.status,
'instance_uuid': existed.instance_uuid,
'pci_exception': e.format_message()})
# Note(yjiang5): remove the device by force so that
# db entry is cleaned in next sync.
existed.status = 'removed'
else:
# Note(yjiang5): no need to update stats if an assigned
# device is hot removed.
self.stats.remove_device(existed)
else:
new_value = next((dev for dev in devices if
dev['address'] == existed['address']))
new_value['compute_node_id'] = self.node_id
if existed['status'] in ('claimed', 'allocated'):
# Pci properties may change while assigned because of
# hotplug or config changes. Although normally this should
# not happen.
# As the devices have been assigned to a instance, we defer
# the change till the instance is destroyed. We will
# not sync the new properties with database before that.
# TODO(yjiang5): Not sure if this is a right policy, but
# at least it avoids some confusion and, if needed,
# we can add more action like killing the instance
# by force in future.
self.stale[new_value['address']] = new_value
else:
pci_device.update_device(existed, new_value)
for dev in [dev for dev in devices if
dev['address'] in new_addrs - exist_addrs]:
dev['compute_node_id'] = self.node_id
dev_obj = objects.PciDevice.create(dev)
self.pci_devs.append(dev_obj)
self.stats.add_device(dev_obj)
def _claim_instance(self, context, instance, prefix=''):
pci_requests = objects.InstancePCIRequests.get_by_instance(
context, instance)
if not pci_requests.requests:
return None
devs = self.stats.consume_requests(pci_requests.requests)
if not devs:
raise exception.PciDeviceRequestFailed(pci_requests)
for dev in devs:
pci_device.claim(dev, instance)
return devs
def _allocate_instance(self, instance, devs):
for dev in devs:
pci_device.allocate(dev, instance)
def _free_device(self, dev, instance=None):
pci_device.free(dev, instance)
stale = self.stale.pop(dev['address'], None)
if stale:
pci_device.update_device(dev, stale)
self.stats.add_device(dev)
def _free_instance(self, instance):
# Note(yjiang5): When a instance is resized, the devices in the
# destination node are claimed to the instance in prep_resize stage.
# However, the instance contains only allocated devices
# information, not the claimed one. So we can't use
# instance['pci_devices'] to check the devices to be freed.
for dev in self.pci_devs:
if (dev['status'] in ('claimed', 'allocated') and
dev['instance_uuid'] == instance['uuid']):
self._free_device(dev)
def update_pci_for_instance(self, context, instance):
"""Update instance's pci usage information.
The caller should hold the COMPUTE_RESOURCE_SEMAPHORE lock
"""
uuid = instance['uuid']
vm_state = instance['vm_state']
task_state = instance['task_state']
if vm_state == vm_states.DELETED:
if self.allocations.pop(uuid, None):
self._free_instance(instance)
elif self.claims.pop(uuid, None):
self._free_instance(instance)
elif task_state == task_states.RESIZE_MIGRATED:
devs = self.allocations.pop(uuid, None)
if devs:
self._free_instance(instance)
elif task_state == task_states.RESIZE_FINISH:
devs = self.claims.pop(uuid, None)
if devs:
self._allocate_instance(instance, devs)
self.allocations[uuid] = devs
elif (uuid not in self.allocations and
uuid not in self.claims):
devs = self._claim_instance(context, instance)
if devs:
self._allocate_instance(instance, devs)
self.allocations[uuid] = devs
def update_pci_for_migration(self, context, instance, sign=1):
"""Update instance's pci usage information when it is migrated.
The caller should hold the COMPUTE_RESOURCE_SEMAPHORE lock.
:param sign: claim devices for instance when sign is 1, remove
the claims when sign is -1
"""
uuid = instance['uuid']
if sign == 1 and uuid not in self.claims:
devs = self._claim_instance(context, instance, 'new_')
if devs:
self.claims[uuid] = devs
if sign == -1 and uuid in self.claims:
self._free_instance(instance)
def clean_usage(self, instances, migrations, orphans):
"""Remove all usages for instances not passed in the parameter.
The caller should hold the COMPUTE_RESOURCE_SEMAPHORE lock
"""
existed = [inst['uuid'] for inst in instances]
existed += [mig['instance_uuid'] for mig in migrations]
existed += [inst['uuid'] for inst in orphans]
for uuid in self.claims.keys():
if uuid not in existed:
devs = self.claims.pop(uuid, [])
for dev in devs:
self._free_device(dev)
for uuid in self.allocations.keys():
if uuid not in existed:
devs = self.allocations.pop(uuid, [])
for dev in devs:
self._free_device(dev)
def set_compute_node_id(self, node_id):
"""Set the compute node id that this object is tracking for.
In current resource tracker implementation, the
compute_node entry is created in the last step of
update_available_resoruces, thus we have to lazily set the
compute_node_id at that time.
"""
if self.node_id and self.node_id != node_id:
raise exception.PciTrackerInvalidNodeId(node_id=self.node_id,
new_node_id=node_id)
self.node_id = node_id
for dev in self.pci_devs:
dev.compute_node_id = node_id
def get_instance_pci_devs(inst, request_id=None):
"""Get the devices allocated to one or all requests for an instance.
- For generic PCI request, the request id is None.
- For sr-iov networking, the request id is a valid uuid
- There are a couple of cases where all the PCI devices allocated to an
instance need to be returned. Refer to libvirt driver that handles
soft_reboot and hard_boot of 'xen' instances.
"""
if isinstance(inst, objects.Instance):
pci_devices = inst.pci_devices
else:
ctxt = context.get_admin_context()
pci_devices = objects.PciDeviceList.get_by_instance_uuid(
ctxt, inst['uuid'])
return [device for device in pci_devices if
device.request_id == request_id or request_id == 'all']
|
|
import os
from subprocess import check_output, check_call
from fabric.api import env, task, cd, settings, put
from fabric.contrib.files import exists
from .. import util, system
from .. contextmanagers import project # , log_call, virtualenv
__all__ = [
'create', 'install', 'remove', 'remove_templates',
'git_push', 'get_git_remotes', 'create_user', 'pip'
]
@task
def remove():
"""
Blow away the current project
"""
if exists(env.virtualenv_path):
system.sudo("rm -rf %s" % env.virtualenv_path)
@task
def remove_templates():
"""
Remove any files we have deployed from templates
"""
for template in util.get_templates().values():
remote_path = template["remote_path"]
if exists(remote_path):
system.sudo("rm %s" % remote_path)
@task
def git_push(rev=None):
"""
Push the local git repo to the remote hosts
"""
# push either the specified revision, or the default GIT_BRANCH as
# specified in settings.
if rev is None:
rev = env.git_branch
# ensure the project path exists and is an initialiezd git repo.
if not exists(env.project_root):
raise Exception(
"The project root is missing! Do you need to run the install() task?")
u = env.user
env.user = env.ssh_user
with cd(env.project_root):
system.run("git init")
system.run("git config --add receive.denyCurrentBranch ignore")
set_permissions()
remotes = get_git_remotes()
# deprecated; see function docstring
# define_local_git_ssh()
# Set up the remote host as a git remote in our local configuration
#
# WAT: By only executing this sequence if the host is not listed in the remotes,
# deployments will break if we change either the user or project root.
# Needs fixin.
h = env.host_string
if h not in remotes:
util.print_command("git remote add %s %s@%s:%s" % (h, env.user, h, env.project_root))
check_call(["git", "remote", "add", h, "%s@%s:%s" % (env.user, h, env.project_root)])
remotes = get_git_remotes()
# A little git jazz-hands here, to manage the push by first checking to see if
# the default branch exists in the remote repository.
pushed = False
if rev == env.git_branch:
# determine if the remote branch exists
with cd(env.project_root):
ret = system.run("git branch")
if rev not in ret:
util.print_command("git push %s %s" % (h, rev))
check_call(["git", "push", h, rev])
pushed = True
# The first push must create the master branch, so we must only specify the rev,
# not the source ref. Because git reasons...
if not pushed:
util.print_command("git push %s HEAD:%s" % (h, rev))
check_call(["git", "push", h, "HEAD:%s" % rev])
# ...but pushing into a branch on a remote that already exists will cause madness;
# we must ensure the working tree is in sync with the newly-pushed ref.
with cd(env.project_root):
system.run("git checkout %s" % rev)
system.run("git reset --hard") # weeeeee!
system.run("git submodule init")
system.run("git submodule update")
# fix up the permissions immediately after completing the push, so we don't
# try to interact with files we cannot read or modify.
set_permissions()
env.user = u
@task
def create():
"""
(re)create a virtualenv for a python project deployment
This function sets up the entire virtualenv, initializes the local git repo inside the project
root, and pushes up the local branch. If invoked when the local virtualenv already exists, it
will prompt for confirmation before destorying its, unless NO_PROMPTS is True in your settings.
"""
# Create virtualenv
system.sudo("mkdir -p %s" % env.virtualenv_home)
system.sudo("chown %s:staff %s" % (env.user, env.virtualenv_home))
# this bit also evolved from the mezzanine original. Seriously, what a
# great project.
with cd(env.virtualenv_home):
# remove the existing virtual environment and project root, if any.
if exists(env.project_name):
if not env.no_prompts:
prompt = raw_input("\nVirtualenv exists: %s\nWould you like "
"to replace it? (yes/no) " % env.project_name)
if prompt.lower() != "yes":
print "\nAborting!"
return False
remove()
# create the new virtualenv and project root
system.sudo("virtualenv %s" % env.project_name)
system.sudo("mkdir -p %s" % env.project_root)
set_permissions()
with cd(env.project_root):
# do the initial configuration of the git client, so that we can do our
# push unmolested.
if env.use_git:
system.sudo(
"su -l {0} -c \"git config --global user.email '{0}'\"".format(env.project_user))
system.sudo(
"su -l {0} -c \"git config --global user.name '{0}'\"".format(env.project_user))
system.sudo("su -l {0} -c \"git config --global receive.denyCurrentBranch ignore\"".format(
env.project_user
))
git_push()
else:
# WAT not sure I ever want to re-implement flat-file support, tbqh.
raise NotImplementedError(
"flat-file support not implemented at this time.")
#root = os.path.dirname(os.path.abspath(__file__))
#system.sudo("mkdir -p %s" % env.project_root)
# for target in env.upload_targets:
# put("%s/%s" %
# (root, target), env.project_root, use_sudo=True, mirror_local_mode=True)
@task
def install():
"""
Create the python virtualenv and deployment directories if necessary
A generic installation task that should be run at least once when deploying a new project,
since it covers a bunch of stuff that will be common to any python application deployment.
"""
create_user()
if not exists(env.virtualenv_home) or not exists(env.project_root):
create()
install_dependencies()
return True
return False
def set_permissions():
"""
Ensure that the project entire virtualenv is owned by the project user,
that all directories are 2775, and that all files are writable by the group.
This allows the privileged fabric ssh user to modify these files during
deployment, but keeps everything nicely isolated when accessed from inside
the running application.
"""
system.sudo("chown -R %s:%s %s" % (env.project_user, env.project_group, env.virtualenv_path))
system.sudo("find %s -type d -exec chmod 2775 {} \\;" % env.virtualenv_path)
system.sudo("find %s -type f -exec chmod g+rw {} \\;" % env.virtualenv_path)
def define_local_git_ssh():
"""
Configure git to execute ssh with the corrrect identity file.
WAT: This is likely deprecated now that we correctly honor fab's command-line overrides,
so we should consider this for removal.
"""
if not env.key_filename:
return
sh = './fabric_ssh.sh'
# skip host IP checking, but only in staging.
ssh_opts = ""
if env.environment == 'staging':
ssh_opts = '-oCheckHostIP=no'
with open(sh, 'w') as f:
f.write('#!/bin/sh\n')
f.write('ssh -i %s %s $*' % (env.key_filename, ssh_opts))
os.chmod(sh, 0755)
os.environ.setdefault('GIT_SSH', sh)
def get_git_remotes():
"""
Return a dict of remotes in the current (local) git repo.
"""
remotes = {}
for line in check_output(["git", "remote", "-v"]).split("\n"):
# Sample output:
# foo.org fabric@foo.org:/websites/foo/project.git (push)
if line:
(name, url, op) = line.split()
if op == "(push)":
remotes[name] = url
return remotes
def create_user():
"""
(re)create the user and group under which the project should run
The project user is limited -- it should not have sudo access, nor should
it be a member of the staff group.
"""
# warnings only, since we accept more than the 0 exit status.
with settings(warn_only=True):
system.sudo("groupadd -f %s" % env.project_group)
result = system.sudo(
"useradd -g {1} -m -d /home/{0} -s /bin/bash {0}".format(
env.project_user,
env.project_group
)
)
if result.return_code not in [0, 9]:
print result
raise SystemExit()
system.run("mkdir -p /home/%s/.ssh" % env.project_user)
key = "keys/%s.pub" % env.project_user
if (os.path.exists(key)):
put(key, "/home/%s/.ssh/authorized_keys" % env.project_user)
system.run("chmod 600 /home/%s/.ssh/authorized_keys" % env.project_user)
else:
print "Warning: No public key found for user %s!" % env.project_user
system.run("chown -R {0}:{0} /home/{0}/.ssh".format(env.project_user))
system.run("chmod 700 /home/%s/.ssh" % env.project_user)
# We place the fabric ssh user in the project group, because the
# fabric user needs to be able to modify files owned by the user.
system.sudo("usermod -a -G {1} {0}".format(env.ssh_user, env.project_group))
@task
def pip(packages):
"""
Installs one or more Python packages within the virtual environment.
"""
return system.sudo("pip install %s" % packages)
@task
def install_dependencies():
"""
Install any missing or updated python modules listed in PIP_REQUIREMENTS_PATH
"""
with project(env):
for p in getattr(env, 'pip_requirements_path', []):
# skip any requirements file that doesn't exist on the remote host. This lets us
# ignore cotton's default requirements, if they're listed in the env.
#
# WAT Should we print a warning here? meh.
fn = env.project_root + '/' + p
if exists(fn):
pip("-r %s" % fn)
|
|
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from collections import OrderedDict
import logging
import os
import yaml
from toscaparser.elements.interfaces import InterfacesDef
from toscaparser.functions import GetInput
from toscaparser.nodetemplate import NodeTemplate
from toscaparser.utils.gettextutils import _
SECTIONS = (TYPE, PROPERTIES, MEDADATA, DEPENDS_ON, UPDATE_POLICY,
DELETION_POLICY) = \
('type', 'properties', 'metadata',
'depends_on', 'update_policy', 'deletion_policy')
HEAT_TEMPLATE_VERSION = '2013-05-23'
HEAT_DESCRIPTION = 'Scaling template'
policy_type = ['tosca.policies.Placement',
'tosca.policies.Scaling',
'tosca.policies.Scaling.Cluster',
'tosca.policies.Monitoring',
'tosca.policies.Reservation',
'tosca.policies.nfv.InstantiationLevels',
'tosca.policies.nfv.ScalingAspects',
'tosca.policies.nfv.VduInitialDelta',
'tosca.policies.nfv.VduInstantiationLevels',
'tosca.policies.nfv.VduScalingAspectDeltas',
'tosca.policies.nfv.VirtualLinkInstantiationLevels']
log = logging.getLogger('heat-translator')
class HotResource(object):
'''Base class for TOSCA node type translation to Heat resource type.'''
def __init__(self, nodetemplate, name=None, type=None, properties=None,
metadata=None, depends_on=None,
update_policy=None, deletion_policy=None, csar_dir=None):
log.debug(_('Translating TOSCA node type to HOT resource type.'))
self.nodetemplate = nodetemplate
if name:
self.name = name
else:
self.name = nodetemplate.name
self.type = type
self.properties = properties or {}
self.csar_dir = csar_dir
# special case for HOT softwareconfig
cwd = os.getcwd()
if type == 'OS::Heat::SoftwareConfig':
config = self.properties.get('config')
if isinstance(config, dict):
if self.csar_dir:
os.chdir(self.csar_dir)
implementation_artifact = os.path.abspath(config.get(
'get_file'))
else:
implementation_artifact = config.get('get_file')
if implementation_artifact:
filename, file_extension = os.path.splitext(
implementation_artifact)
file_extension = file_extension.lower()
# artifact_types should be read to find the exact script
# type, unfortunately artifact_types doesn't seem to be
# supported by the parser
if file_extension == '.ansible' \
or file_extension == '.yaml' \
or file_extension == '.yml':
self.properties['group'] = 'ansible'
if file_extension == '.pp':
self.properties['group'] = 'puppet'
if self.properties.get('group') is None:
self.properties['group'] = 'script'
os.chdir(cwd)
self.metadata = metadata
# The difference between depends_on and depends_on_nodes is
# that depends_on defines dependency in the context of the
# HOT template and it is used during the template output.
# Depends_on_nodes defines the direct dependency between the
# tosca nodes and is not used during the output of the
# HOT template but for internal processing only. When a tosca
# node depends on another node it will be always added to
# depends_on_nodes but not always to depends_on. For example
# if the source of dependency is a server, the dependency will
# be added as properties.get_resource and not depends_on
if depends_on:
self.depends_on = depends_on
self.depends_on_nodes = depends_on
else:
self.depends_on = []
self.depends_on_nodes = []
self.update_policy = update_policy
self.deletion_policy = deletion_policy
self.group_dependencies = {}
# if hide_resource is set to true, then this resource will not be
# generated in the output yaml.
self.hide_resource = False
def handle_properties(self):
# the property can hold a value or the intrinsic function get_input
# for value, copy it
# for get_input, convert to get_param
for prop in self.nodetemplate.get_properties_objects():
pass
def handle_life_cycle(self):
hot_resources = []
deploy_lookup = {}
# TODO(anyone): sequence for life cycle needs to cover different
# scenarios and cannot be fixed or hard coded here
operations_deploy_sequence = ['create', 'configure', 'start']
operations = HotResource.get_all_operations(self.nodetemplate)
# create HotResource for each operation used for deployment:
# create, start, configure
# ignore the other operations
# observe the order: create, start, configure
# use the current HotResource for the first operation in this order
# hold the original name since it will be changed during
# the transformation
node_name = self.name
reserve_current = 'NONE'
for operation in operations_deploy_sequence:
if operation in operations.keys():
reserve_current = operation
break
# create the set of SoftwareDeployment and SoftwareConfig for
# the interface operations
hosting_server = None
if self.nodetemplate.requirements is not None:
hosting_server = self._get_hosting_server()
sw_deployment_resource = HOTSoftwareDeploymentResources(hosting_server)
server_key = sw_deployment_resource.server_key
servers = sw_deployment_resource.servers
sw_deploy_res = sw_deployment_resource.software_deployment
# hosting_server is None if requirements is None
hosting_on_server = hosting_server if hosting_server else None
base_type = HotResource.get_base_type_str(
self.nodetemplate.type_definition)
# if we are on a compute node the host is self
if hosting_on_server is None and base_type == 'tosca.nodes.Compute':
hosting_on_server = self.name
servers = {'get_resource': self.name}
cwd = os.getcwd()
for operation in operations.values():
if operation.name in operations_deploy_sequence:
config_name = node_name + '_' + operation.name + '_config'
deploy_name = node_name + '_' + operation.name + '_deploy'
if self.csar_dir:
os.chdir(self.csar_dir)
get_file = os.path.abspath(operation.implementation)
else:
get_file = operation.implementation
hot_resources.append(
HotResource(self.nodetemplate,
config_name,
'OS::Heat::SoftwareConfig',
{'config':
{'get_file': get_file}},
csar_dir=self.csar_dir))
if operation.name == reserve_current and \
base_type != 'tosca.nodes.Compute':
deploy_resource = self
self.name = deploy_name
self.type = sw_deploy_res
self.properties = {'config': {'get_resource': config_name},
server_key: servers}
deploy_lookup[operation] = self
else:
sd_config = {'config': {'get_resource': config_name},
server_key: servers}
deploy_resource = \
HotResource(self.nodetemplate,
deploy_name,
sw_deploy_res,
sd_config, csar_dir=self.csar_dir)
hot_resources.append(deploy_resource)
deploy_lookup[operation] = deploy_resource
lifecycle_inputs = self._get_lifecycle_inputs(operation)
if lifecycle_inputs:
deploy_resource.properties['input_values'] = \
lifecycle_inputs
os.chdir(cwd)
# Add dependencies for the set of HOT resources in the sequence defined
# in operations_deploy_sequence
# TODO(anyone): find some better way to encode this implicit sequence
group = {}
op_index_min = None
op_index_max = -1
for op, hot in deploy_lookup.items():
# position to determine potential preceding nodes
op_index = operations_deploy_sequence.index(op.name)
if op_index_min is None or op_index < op_index_min:
op_index_min = op_index
if op_index > op_index_max:
op_index_max = op_index
for preceding_op_name in \
reversed(operations_deploy_sequence[:op_index]):
preceding_hot = deploy_lookup.get(
operations.get(preceding_op_name))
if preceding_hot:
hot.depends_on.append(preceding_hot)
hot.depends_on_nodes.append(preceding_hot)
group[preceding_hot] = hot
break
if op_index_max >= 0:
last_deploy = deploy_lookup.get(operations.get(
operations_deploy_sequence[op_index_max]))
else:
last_deploy = None
# save this dependency chain in the set of HOT resources
self.group_dependencies.update(group)
for hot in hot_resources:
hot.group_dependencies.update(group)
roles_deploy_resource = self._handle_ansiblegalaxy_roles(
hot_resources, node_name, servers)
# add a dependency to this ansible roles deploy to
# the first "classic" deploy generated for this node
if roles_deploy_resource and op_index_min:
first_deploy = deploy_lookup.get(operations.get(
operations_deploy_sequence[op_index_min]))
first_deploy.depends_on.append(roles_deploy_resource)
first_deploy.depends_on_nodes.append(roles_deploy_resource)
return hot_resources, deploy_lookup, last_deploy
def _handle_ansiblegalaxy_roles(self, hot_resources, initial_node_name,
hosting_on_server):
artifacts = self.get_all_artifacts(self.nodetemplate)
install_roles_script = ''
sw_deployment_resource = \
HOTSoftwareDeploymentResources(hosting_on_server)
server_key = sw_deployment_resource.server_key
sw_deploy_res = sw_deployment_resource.software_deployment
for artifact_name, artifact in artifacts.items():
artifact_type = artifact.get('type', '').lower()
if artifact_type == 'tosca.artifacts.ansiblegalaxy.role':
role = artifact.get('file', None)
if role:
install_roles_script += 'ansible-galaxy install ' + role \
+ '\n'
if install_roles_script:
# remove trailing \n
install_roles_script = install_roles_script[:-1]
# add shebang and | to use literal scalar type (for multiline)
install_roles_script = '|\n#!/bin/bash\n' + install_roles_script
config_name = initial_node_name + '_install_roles_config'
deploy_name = initial_node_name + '_install_roles_deploy'
hot_resources.append(
HotResource(self.nodetemplate, config_name,
'OS::Heat::SoftwareConfig',
{'config': install_roles_script},
csar_dir=self.csar_dir))
sd_config = {'config': {'get_resource': config_name},
server_key: hosting_on_server}
deploy_resource = \
HotResource(self.nodetemplate, deploy_name,
sw_deploy_res,
sd_config, csar_dir=self.csar_dir)
hot_resources.append(deploy_resource)
return deploy_resource
def handle_connectsto(self, tosca_source, tosca_target, hot_source,
hot_target, config_location, operation):
# The ConnectsTo relationship causes a configuration operation in
# the target.
# This hot resource is the software config portion in the HOT template
# This method adds the matching software deployment with the proper
# target server and dependency
if config_location == 'target':
hosting_server = hot_target._get_hosting_server()
hot_depends = hot_target
elif config_location == 'source':
hosting_server = self._get_hosting_server()
hot_depends = hot_source
sw_deployment_resource = HOTSoftwareDeploymentResources(hosting_server)
server_key = sw_deployment_resource.server_key
servers = sw_deployment_resource.servers
sw_deploy_res = sw_deployment_resource.software_deployment
deploy_name = tosca_source.name + '_' + tosca_target.name + \
'_connect_deploy'
sd_config = {'config': {'get_resource': self.name},
server_key: servers}
deploy_resource = \
HotResource(self.nodetemplate,
deploy_name,
sw_deploy_res,
sd_config,
depends_on=[hot_depends], csar_dir=self.csar_dir)
connect_inputs = self._get_connect_inputs(config_location, operation)
if connect_inputs:
deploy_resource.properties['input_values'] = connect_inputs
return deploy_resource
def handle_expansion(self):
pass
def handle_hosting(self):
# handle hosting server for the OS:HEAT::SoftwareDeployment
# from the TOSCA nodetemplate, traverse the relationship chain
# down to the server
sw_deploy_group = \
HOTSoftwareDeploymentResources.HOT_SW_DEPLOYMENT_GROUP_RESOURCE
sw_deploy = HOTSoftwareDeploymentResources.HOT_SW_DEPLOYMENT_RESOURCE
if self.properties.get('servers') and \
self.properties.get('server'):
del self.properties['server']
if self.type == sw_deploy_group or self.type == sw_deploy:
# skip if already have hosting
# If type is NodeTemplate, look up corresponding HotResrouce
host_server = self.properties.get('servers') \
or self.properties.get('server')
if host_server is None:
raise Exception(_("Internal Error: expecting host "
"in software deployment"))
elif isinstance(host_server.get('get_resource'), NodeTemplate):
self.properties['server']['get_resource'] = \
host_server['get_resource'].name
elif isinstance(host_server, dict) and \
not host_server.get('get_resource'):
self.properties['servers'] = \
host_server
def top_of_chain(self):
dependent = self.group_dependencies.get(self)
if dependent is None:
return self
else:
return dependent.top_of_chain()
# this function allows to provides substacks as external files
# those files will be dumped along the output file.
#
# return a dict of filename-content
def extract_substack_templates(self, base_filename, hot_template_version):
return {}
# this function asks the resource to embed substacks
# into the main template, if any.
# this is used when the final output is stdout
def embed_substack_templates(self, hot_template_version):
pass
def get_dict_output(self):
resource_sections = OrderedDict()
resource_sections[TYPE] = self.type
if self.properties:
resource_sections[PROPERTIES] = self.properties
if self.metadata:
resource_sections[MEDADATA] = self.metadata
if self.depends_on:
resource_sections[DEPENDS_ON] = []
for depend in self.depends_on:
resource_sections[DEPENDS_ON].append(depend.name)
if self.update_policy:
resource_sections[UPDATE_POLICY] = self.update_policy
if self.deletion_policy:
resource_sections[DELETION_POLICY] = self.deletion_policy
return {self.name: resource_sections}
def _get_lifecycle_inputs(self, operation):
# check if this lifecycle operation has input values specified
# extract and convert to HOT format
if isinstance(operation.value, str):
# the operation has a static string
return {}
# the operation is a dict {'implemenation': xxx, 'input': yyy}
inputs = operation.value.get('inputs')
deploy_inputs = {}
if inputs:
for name, value in inputs.items():
deploy_inputs[name] = value
return deploy_inputs
def _get_connect_inputs(self, config_location, operation):
if config_location == 'target':
inputs = operation.get('pre_configure_target').get('inputs')
elif config_location == 'source':
inputs = operation.get('pre_configure_source').get('inputs')
deploy_inputs = {}
if inputs:
for name, value in inputs.items():
deploy_inputs[name] = value
return deploy_inputs
def _get_hosting_server(self, node_template=None):
# find the server that hosts this software by checking the
# requirements and following the hosting chain
hosting_servers = []
host_exists = False
this_node_template = self.nodetemplate \
if node_template is None else node_template
for requirement in this_node_template.requirements:
for requirement_name, assignment in requirement.items():
for check_node in this_node_template.related_nodes:
# check if the capability is Container
if isinstance(assignment, dict):
node_name = assignment.get('node')
else:
node_name = assignment
if node_name and node_name == check_node.name:
if self._is_container_type(requirement_name,
check_node):
hosting_servers.append(check_node.name)
host_exists = True
elif check_node.related_nodes and not host_exists:
return self._get_hosting_server(check_node)
if hosting_servers:
return hosting_servers
return None
def _is_container_type(self, requirement_name, node):
# capability is a list of dict
# For now just check if it's type tosca.nodes.Compute
# TODO(anyone): match up requirement and capability
base_type = HotResource.get_base_type_str(node.type_definition)
if base_type == 'tosca.nodes.Compute':
return True
else:
return False
def get_hot_attribute(self, attribute, args):
# this is a place holder and should be implemented by the subclass
# if translation is needed for the particular attribute
raise Exception(_("No translation in TOSCA type {0} for attribute "
"{1}").format(self.nodetemplate.type, attribute))
def get_tosca_props(self):
tosca_props = {}
for prop in self.nodetemplate.get_properties_objects():
if isinstance(prop.value, GetInput):
tosca_props[prop.name] = {'get_param': prop.value.input_name}
else:
tosca_props[prop.name] = prop.value
return tosca_props
def represent_ordereddict(self, dumper, data):
nodes = []
for key, value in data.items():
node_key = dumper.represent_data(key)
node_value = dumper.represent_data(value)
nodes.append((node_key, node_value))
return yaml.nodes.MappingNode('tag:yaml.org,2002:map', nodes)
def _handle_nested_template(self, scale_res, yaml_name,
hot_template_parameters,
parameters=None):
template_dict = OrderedDict()
template_dict['heat_template_version'] = HEAT_TEMPLATE_VERSION
template_dict['description'] = HEAT_DESCRIPTION
if parameters is not None:
template_dict['parameters'] = parameters
if hot_template_parameters:
all_params = OrderedDict()
for parameter in hot_template_parameters:
all_params.update(parameter.get_dict_output())
template_dict.update({'parameters': all_params})
template_dict["resources"] = {}
dict_res = OrderedDict()
for res in scale_res:
dict_res = res.get_dict_output()
res_name = list(dict_res.keys())[0]
template_dict["resources"][res_name] = \
dict_res[res_name]
yaml.add_representer(OrderedDict, self.represent_ordereddict)
yaml.add_representer(dict, self.represent_ordereddict)
yaml_string = yaml.dump(template_dict, default_flow_style=False)
yaml_string = yaml_string.replace('\'', '').replace('\n\n', '\n')
nested_template = {
yaml_name: yaml_string
}
return nested_template
def remove_depends_on(self, depends_on_set):
# Remove all depends_on including depends_on_set.
for rel, node in self.nodetemplate.relationships.items():
for do in depends_on_set:
if rel.is_derived_from(do):
for hot_resource in self.depends_on_nodes:
if node.name == hot_resource.name and \
hot_resource in self.depends_on:
self.depends_on.remove(hot_resource)
break
@staticmethod
def get_all_artifacts(nodetemplate):
# workaround bug in the parser
base_type = HotResource.get_base_type_str(nodetemplate.type_definition)
if base_type in policy_type:
artifacts = {}
else:
artifacts = nodetemplate.type_definition.get_value('artifacts',
parent=True)
if not artifacts:
artifacts = {}
tpl_artifacts = nodetemplate.entity_tpl.get('artifacts')
if tpl_artifacts:
artifacts.update(tpl_artifacts)
return artifacts
@staticmethod
def get_all_operations(node):
operations = {}
for operation in node.interfaces:
operations[operation.name] = operation
# workaround bug in the parser
base_type = HotResource.get_base_type_str(node.type_definition)
if base_type in policy_type:
return operations
node_type = node.type_definition
while True:
type_operations = HotResource._get_interface_operations_from_type(
node_type, node, 'Standard')
type_operations.update(operations)
operations = type_operations
if node_type.parent_type is not None:
node_type = node_type.parent_type
else:
return operations
@staticmethod
def _get_interface_operations_from_type(node_type, node, lifecycle_name):
operations = {}
base_type = HotResource.get_base_type_str(node_type)
if base_type in policy_type:
return operations
if node_type.interfaces and lifecycle_name in node_type.interfaces:
for name, elems in node_type.interfaces[lifecycle_name].items():
# ignore empty operations (only type)
# ignore global interface inputs,
# concrete inputs are on the operations themselves
if name != 'type' and name != 'inputs':
operations[name] = InterfacesDef(node_type,
lifecycle_name,
node, name, elems)
return operations
@staticmethod
def get_base_type_str(node_type):
if isinstance(node_type, str):
return node_type
if node_type.parent_type is not None:
parent_type_str = None
if isinstance(node_type.parent_type, str):
parent_type_str = node_type.parent_type
else:
parent_type_str = node_type.parent_type.type
if parent_type_str and parent_type_str.endswith('.Root'):
return node_type.type
return HotResource.get_base_type_str(node_type.parent_type)
return node_type.type
class HOTSoftwareDeploymentResources(object):
"""Provides HOT Software Deployment resources
SoftwareDeployment or SoftwareDeploymentGroup Resource
"""
HOT_SW_DEPLOYMENT_RESOURCE = 'OS::Heat::SoftwareDeployment'
HOT_SW_DEPLOYMENT_GROUP_RESOURCE = 'OS::Heat::SoftwareDeploymentGroup'
def __init__(self, hosting_server=None):
self.software_deployment = self.HOT_SW_DEPLOYMENT_RESOURCE
self.software_deployment_group = self.HOT_SW_DEPLOYMENT_GROUP_RESOURCE
self.server_key = 'server'
self.hosting_server = hosting_server
self.servers = {}
if hosting_server is not None:
if len(self.hosting_server) == 1:
if isinstance(hosting_server, list):
self.servers['get_resource'] = self.hosting_server[0]
else:
for server in self.hosting_server:
self.servers[server] = {'get_resource': server}
self.software_deployment = self.software_deployment_group
self.server_key = 'servers'
|
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import sys
import os
import logging
import numpy
import pandas
import pyorganism
import pyorganism.regulation as pyreg
from itertools import izip
from fuzzywuzzy import fuzz
from pyorganism.io.microarray import read_microarray
LOGGER = logging.getLogger()
LOGGER.addHandler(logging.StreamHandler())
LOGGER.setLevel(logging.INFO)
VERSION = "default"
single_dir = "Expression/LZ41-LZ54_single_knockouts"
double_dir = "Expression/LZ41-LZ54_double_knockouts"
CONFIG = dict(
continuous=False,
data_paths=[
(os.path.join(single_dir, "LZ41-LZ54.tsv"), "wt"),
(os.path.join(single_dir, "LZ41_d_fis-LZ54_d_fis.tsv"), "fis"),
(os.path.join(single_dir, "LZ41_d_hns-LZ54_d_hns.tsv"), "hns"),
(os.path.join(single_dir, "LZ41-LZ41_d_fis.tsv"), "wt-fis-low"),
(os.path.join(single_dir, "LZ54-LZ54_d_fis.tsv"), "wt-fis-high"),
(os.path.join(single_dir, "LZ41-LZ41_d_hns.tsv"), "wt-hns-low"),
(os.path.join(single_dir, "LZ54-LZ54_d_hns.tsv"), "wt-hns-high"),
(os.path.join(double_dir, "LZ41_d_fis_hns-LZ54_d_fis_hns.tsv"), "fis-hns"),
(os.path.join(double_dir, "LZ41-LZ41_d_fis_hns.tsv"), "wt-fis-hns-low"),
(os.path.join(double_dir, "LZ54-LZ54_d_fis_hns.tsv"),
"wt-fis-hns-high"),
],
data_load=[
read_microarray,
read_microarray,
read_microarray,
read_microarray,
read_microarray,
read_microarray,
read_microarray,
read_microarray,
read_microarray,
read_microarray
],
threshold=80
)
def compile_names(experiment_paths, loading_funcs):
LOGGER.info("Loading discrete expression data")
data_frames = [load_data(path)\
for ((path, name), load_data) in izip(experiment_paths, loading_funcs)]
return pandas.concat(data_frames, ignore_index=True)
def compile_feature_map(genes, features, gene_finder):
feature2gene = dict()
gap = list()
for name in features:
try:
feature2gene[name] = gene_finder(name)
except IndexError:
gap.append(name)
LOGGER.info("Found %d/%d (%3.2f%%)", len(feature2gene), len(features),
100.0 * len(feature2gene) / len(features))
return (feature2gene, gap)
def extend_feature_map(feature2gene, gap, gene_finder):
found = list()
for name in gap:
try:
feature2gene[name] = gene_finder(name)
found.append(name)
except IndexError:
continue
gap = set(gap).difference(set(found))
LOGGER.info("Found %d additional genes", len(found))
return gap
def fuzzy_extension(feature2gene, gap, gene_finder, thresh=80):
found = list()
for name in gap:
try:
(gene, match, score) = gene_finder.fuzzy_search(name,
threshold=thresh, scorer=fuzz.QRatio)
feature2gene[name] = gene
found.append(name)
except IndexError:
LOGGER.debug("'%s' not found", name)
feature2gene[name] = None
gap = set(gap).difference(set(found))
LOGGER.info("Found %d additional genes", len(found))
return gap
def synonym_finder(genes):
targets = list()
indeces = list()
for (i, gene) in enumerate(genes):
if gene.bnumber:
targets.append(gene.bnumber)
indeces.append(i)
for name in gene.synonyms:
targets.append(name)
indeces.append(i)
# if gene.product:
# targets.append(gene.product.name)
# indeces.append(i)
# for name in gene.product.synonyms:
# targets.append(name)
# indeces.append(i)
# if gene.regulatory_product:
# targets.append(gene.regulatory_product.name)
# indeces.append(i)
# for name in gene.regulatory_product.synonyms:
# targets.append(name)
# indeces.append(i)
return pyorganism.FindObject(genes, targets=targets, indeces=indeces)
def manual_name_updates(name2gene):
# [b1500](http://regulondb.ccg.unam.mx/gene?term=ECK120003379&organism=ECK12&format=jsp&type=gene)
gene = pyreg.Gene.get("ECK120003379", None, VERSION)
name2gene["b1500"] = gene
# [b0609](http://regulondb.ccg.unam.mx/gene?term=ECK120002943&organism=ECK12&format=jsp&type=gene)
gene = pyreg.Gene.get("ECK120002943", None, VERSION)
name2gene["b0609"] = gene
# [b0332](http://www.ncbi.nlm.nih.gov/gene/?term=944992) is not recorded in RegulonDB.
# [b3975](http://regulondb.ccg.unam.mx/gene?term=G7818&type=gene&format=jsp)
gene = pyreg.Gene.get("ECK120004318", None, VERSION)
name2gene["b3975"] = gene
# [b2084](http://regulondb.ccg.unam.mx/gene?term=G7121&type=gene&format=jsp)
gene = pyreg.Gene.get("ECK120003706", None, VERSION)
name2gene["b2084"] = gene
# [b0322](http://regulondb.ccg.unam.mx/gene?term=G0-10550&type=gene&format=jsp)
gene = pyreg.Gene.get("ECK120026444", None, VERSION)
name2gene["b0322"] = gene
# [b0309](http://regulondb.ccg.unam.mx/gene?term=ECK120002798&organism=ECK12&format=jsp&type=gene)
gene = pyreg.Gene.get("ECK120002798", None, VERSION)
name2gene["b0309"] = gene
# [b2596](http://regulondb.ccg.unam.mx/gene?term=G7353&type=gene&format=jsp)
gene = pyreg.Gene.get("ECK120003922", None, VERSION)
name2gene["b2596"] = gene
# [b1364](http://regulondb.ccg.unam.mx/gene?term=ECK120003282&organism=ECK12&format=jsp&type=gene)
gene = pyreg.Gene.get("ECK120003282", None, VERSION)
name2gene["b1364"] = gene
# [b4091](http://porteco.org/AjaxSearch.jsp?searchString=b4091) is not recorded in RegulonDB.
gene = pyreg.Gene.get("ECK120003116", None, VERSION)
name2gene["ycdF"] = gene
# [ycdF](http://regulondb.ccg.unam.mx/gene?term=ECK120003116&organism=ECK12&format=jsp&type=gene)
gene = pyreg.Gene.get("ECK120003116", None, VERSION)
name2gene["ycdF"] = gene
# [b1903](http://regulondb.ccg.unam.mx/gene?term=G7034&type=gene&format=jsp)
gene = pyreg.Gene.get("ECK120003620", None, VERSION)
name2gene["b1903"] = gene
# [b0501](http://regulondb.ccg.unam.mx/gene?term=G6272&type=gene&format=jsp)
gene = pyreg.Gene.get("ECK120002880", None, VERSION)
name2gene["b0501"] = gene
# [b0165](http://regulondb.ccg.unam.mx/gene?term=ECK120002711&organism=ECK12&format=jsp&type=gene)
gene = pyreg.Gene.get("ECK120002711", None, VERSION)
name2gene["b0165"] = gene
# [rhsC_1](http://regulondb.ccg.unam.mx/gene?term=ECK120000839&organism=ECK12&format=jsp&type=gene)
gene = pyreg.Gene.get("ECK120000839", None, VERSION)
name2gene["rhsC_1"] = gene
# [rhsC_2](http://regulondb.ccg.unam.mx/gene?term=ECK120000839&organism=ECK12&format=jsp&type=gene)
gene = pyreg.Gene.get("ECK120000839", None, VERSION)
name2gene["rhsC_2"] = gene
# [b1354](http://regulondb.ccg.unam.mx/gene?term=G6678&type=gene&format=jsp)
gene = pyreg.Gene.get("ECK120003273", None, VERSION)
name2gene["b1354"] = gene
# [b2651](http://regulondb.ccg.unam.mx/gene?term=ECK120003954&organism=ECK12&format=jsp&type=gene)
gene = pyreg.Gene.get("ECK120003954", None, VERSION)
name2gene["b2651"] = gene
# [b1052](http://regulondb.ccg.unam.mx/gene?term=ECK120003149&organism=ECK12&format=jsp&type=gene)
gene = pyreg.Gene.get("ECK120003149", None, VERSION)
name2gene["b1052"] = gene
# [b0395](http://www.ncbi.nlm.nih.gov/gene/?term=949074) is not recorded in RegulonDB.
# [b0725](http://regulondb.ccg.unam.mx/gene?term=ECK120002988&organism=ECK12&format=jsp&type=gene)
gene = pyreg.Gene.get("ECK120002988", None, VERSION)
name2gene["b0725"] = gene
# [b3837](http://porteco.org/AjaxSearch.jsp?searchString=b3837) is not recorded in RegulonDB.
# [b3007](http://regulondb.ccg.unam.mx/gene?term=G7562&type=gene&format=jsp)
gene = pyreg.Gene.get("ECK120004121", None, VERSION)
name2gene["b3007"] = gene
# [b3004](http://regulondb.ccg.unam.mx/gene?term=G7561&type=gene&format=jsp)
gene = pyreg.Gene.get("ECK120004120", None, VERSION)
name2gene["b3004"] = gene
def verify_union(name2gene, blattner2gene, full_data):
conflicts = 0
matched = 0
pairs = set(full_data[["name", "blattner"]].itertuples(index=False))
for (name, blattner) in pairs:
name_gene = name2gene.get(name)
blattner_gene = blattner2gene.get(blattner)
if name_gene or blattner_gene:
matched += 1
if blattner_gene and name_gene and name_gene.unique_id != blattner_gene.unique_id:
LOGGER.warn("conflict for %s: %s and %s: %s", name,
name_gene.unique_id, blattner, blattner_gene.unique_id)
conflicts += 1
LOGGER.info("Using the union of the maps %d genes were found (%3.2f%%)"\
" involving %d conflicts", matched,
100.0 * matched / len(pairs), conflicts)
def compile_name2gene(objects_path):
LOGGER.info("{0:*^78s}".format("Compile Gene Name Map"))
full_data = compile_names(CONFIG["data_paths"], CONFIG["data_load"])
version = os.path.basename(objects_path)
if not version:
version = os.path.basename(os.path.dirname(objects_path))
global VERSION
VERSION = version
LOGGER.info("{0:*^78s}".format(version))
LOGGER.info("Loading genes")
genes = pyorganism.read_pickle(os.path.join(objects_path, "genes.pkl"))
LOGGER.info("Finding gene names")
names = set(full_data["name"].unique())
if numpy.nan in names:
names.remove(numpy.nan)
names = sorted(names)
finder = pyorganism.FindObject(genes, "name")
(name2gene, name_gap) = compile_feature_map(genes, names, finder)
synonyms = synonym_finder(genes)
LOGGER.info("Finding gene names by synonyms")
name_gap = extend_feature_map(name2gene, name_gap, synonyms)
LOGGER.info("Missing %d gene names", len(name_gap))
LOGGER.info("Fuzzy search of gene names (threshold %d%%)", CONFIG["threshold"])
name_gap = fuzzy_extension(name2gene, name_gap, finder, CONFIG["threshold"])
# LOGGER.info("Fuzzy search of gene names by synonyms (threshold %d%%)", CONFIG["threshold"])
# name_gap = fuzzy_extension(name2gene, name_gap, synonyms, CONFIG["threshold"])
manual_name_updates(name2gene)
num = sum(1 for gene in name2gene.itervalues() if gene)
LOGGER.info("Final map contains %d names and %d genes (%3.2f%%)", len(name2gene),
num, 100.0 * num / len(name2gene))
LOGGER.info("Finding gene blattner numbers")
bnumbers = set(full_data["blattner"].unique())
if numpy.nan in bnumbers:
bnumbers.remove(numpy.nan)
bnumbers = sorted(bnumbers)
gene_finder = pyorganism.FindObject(genes, "bnumber")
(blattner2gene, blattner_gap) = compile_feature_map(genes, bnumbers, gene_finder)
LOGGER.info("Finding gene blattner numbers by synonyms")
blattner_gap = extend_feature_map(blattner2gene, blattner_gap, synonyms)
LOGGER.info("Missing %d gene blattner numbers", len(blattner_gap))
LOGGER.info("Fuzzy search of gene blattner numbers (threshold %d%%)", CONFIG["threshold"])
blattner_gap = fuzzy_extension(blattner2gene, blattner_gap, finder, CONFIG["threshold"])
# LOGGER.info("Fuzzy search of gene blattner numbers by synonyms (threshold %d%%)", CONFIG["threshold"])
# blattner_gap = fuzzy_extension(blattner2gene, blattner_gap, synonyms, CONFIG["threshold"])
num = sum(1 for gene in blattner2gene.itervalues() if gene)
LOGGER.info("Final map contains %d blattner numbers and %d genes (%3.2f%%)",
len(blattner2gene), num, 100.0 * num / len(blattner2gene))
verify_union(name2gene, blattner2gene, full_data)
pyorganism.write_pickle(name2gene, os.path.join(objects_path, "name2gene.pkl"))
pyorganism.write_pickle(blattner2gene, os.path.join(objects_path, "blattner2gene.pkl"))
if __name__ == "__main__":
if len(sys.argv) != 2:
LOGGER.critical("%s <RegulonDB objects path>", sys.argv[0])
sys.exit(2)
else:
compile_name2gene(sys.argv[1])
|
|
from __future__ import print_function, division
from .str import StrPrinter
from sympy.utilities import default_sort_key
class LambdaPrinter(StrPrinter):
"""
This printer converts expressions into strings that can be used by
lambdify.
"""
def _print_MatrixBase(self, expr):
return "%s(%s)" % (expr.__class__.__name__,
self._print((expr.tolist())))
_print_SparseMatrix = \
_print_MutableSparseMatrix = \
_print_ImmutableSparseMatrix = \
_print_Matrix = \
_print_DenseMatrix = \
_print_MutableDenseMatrix = \
_print_ImmutableMatrix = \
_print_ImmutableDenseMatrix = \
_print_MatrixBase
def _print_Piecewise(self, expr):
result = []
i = 0
for arg in expr.args:
e = arg.expr
c = arg.cond
result.append('((')
result.append(self._print(e))
result.append(') if (')
result.append(self._print(c))
result.append(') else (')
i += 1
result = result[:-1]
result.append(') else None)')
result.append(')'*(2*i - 2))
return ''.join(result)
def _print_Sum(self, expr):
loops = (
'for {i} in range({a}, {b}+1)'.format(
i=self._print(i),
a=self._print(a),
b=self._print(b))
for i, a, b in expr.limits)
return '(builtins.sum({function} {loops}))'.format(
function=self._print(expr.function),
loops=' '.join(loops))
def _print_And(self, expr):
result = ['(']
for arg in sorted(expr.args, key=default_sort_key):
result.extend(['(', self._print(arg), ')'])
result.append(' and ')
result = result[:-1]
result.append(')')
return ''.join(result)
def _print_Or(self, expr):
result = ['(']
for arg in sorted(expr.args, key=default_sort_key):
result.extend(['(', self._print(arg), ')'])
result.append(' or ')
result = result[:-1]
result.append(')')
return ''.join(result)
def _print_Not(self, expr):
result = ['(', 'not (', self._print(expr.args[0]), '))']
return ''.join(result)
def _print_BooleanTrue(self, expr):
return "True"
def _print_BooleanFalse(self, expr):
return "False"
def _print_ITE(self, expr):
result = [
'((', self._print(expr.args[1]),
') if (', self._print(expr.args[0]),
') else (', self._print(expr.args[2]), '))'
]
return ''.join(result)
class TensorflowPrinter(LambdaPrinter):
"""
Tensorflow printer which handles vectorized piecewise functions,
logical operators, max/min, and relational operators.
"""
def _print_And(self, expr):
"Logical And printer"
# We have to override LambdaPrinter because it uses Python 'and' keyword.
# If LambdaPrinter didn't define it, we could use StrPrinter's
# version of the function and add 'logical_and' to TENSORFLOW_TRANSLATIONS.
return '{0}({1})'.format('logical_and', ','.join(self._print(i) for i in expr.args))
def _print_Or(self, expr):
"Logical Or printer"
# We have to override LambdaPrinter because it uses Python 'or' keyword.
# If LambdaPrinter didn't define it, we could use StrPrinter's
# version of the function and add 'logical_or' to TENSORFLOW_TRANSLATIONS.
return '{0}({1})'.format('logical_or', ','.join(self._print(i) for i in expr.args))
def _print_Not(self, expr):
"Logical Not printer"
# We have to override LambdaPrinter because it uses Python 'not' keyword.
# If LambdaPrinter didn't define it, we would still have to define our
# own because StrPrinter doesn't define it.
return '{0}({1})'.format('logical_not', ','.join(self._print(i) for i in expr.args))
def _print_Min(self, expr, **kwargs):
from sympy import Min
if len(expr.args) == 1:
return self._print(expr.args[0], **kwargs)
return 'minimum({0}, {1})'.format(
self._print(expr.args[0], **kwargs),
self._print(Min(*expr.args[1:]), **kwargs))
def _print_Max(self, expr, **kwargs):
from sympy import Max
if len(expr.args) == 1:
return self._print(expr.args[0], **kwargs)
return 'maximum({0}, {1})'.format(
self._print(expr.args[0], **kwargs),
self._print(Max(*expr.args[1:]), **kwargs))
def _print_Piecewise(self, expr, **kwargs):
from sympy import Piecewise
e, cond = expr.args[0].args
if len(expr.args) == 1:
return 'select({0}, {1}, {2})'.format(
self._print(cond, **kwargs),
self._print(e, **kwargs),
0)
return 'select({0}, {1}, {2})'.format(
self._print(cond, **kwargs),
self._print(e, **kwargs),
self._print(Piecewise(*expr.args[1:]), **kwargs))
def _print_Relational(self, expr):
"Relational printer for Equality and Unequality"
op = {
'==' :'equal',
'!=' :'not_equal',
'<' :'less',
'<=' :'less_equal',
'>' :'greater',
'>=' :'greater_equal',
}
if expr.rel_op in op:
lhs = self._print(expr.lhs)
rhs = self._print(expr.rhs)
return '{op}({lhs}, {rhs})'.format(op=op[expr.rel_op],
lhs=lhs,
rhs=rhs)
return super(TensorflowPrinter, self)._print_Relational(expr)
class NumPyPrinter(LambdaPrinter):
"""
Numpy printer which handles vectorized piecewise functions,
logical operators, etc.
"""
_default_settings = {
"order": "none",
"full_prec": "auto",
}
def _print_seq(self, seq, delimiter=', '):
"General sequence printer: converts to tuple"
# Print tuples here instead of lists because numba supports
# tuples in nopython mode.
return '({},)'.format(delimiter.join(self._print(item) for item in seq))
def _print_MatMul(self, expr):
"Matrix multiplication printer"
return '({0})'.format(').dot('.join(self._print(i) for i in expr.args))
def _print_DotProduct(self, expr):
# DotProduct allows any shape order, but numpy.dot does matrix
# multiplication, so we have to make sure it gets 1 x n by n x 1.
arg1, arg2 = expr.args
if arg1.shape[0] != 1:
arg1 = arg1.T
if arg2.shape[1] != 1:
arg2 = arg2.T
return "dot(%s, %s)" % (self._print(arg1), self._print(arg2))
def _print_Piecewise(self, expr):
"Piecewise function printer"
exprs = '[{0}]'.format(','.join(self._print(arg.expr) for arg in expr.args))
conds = '[{0}]'.format(','.join(self._print(arg.cond) for arg in expr.args))
# If [default_value, True] is a (expr, cond) sequence in a Piecewise object
# it will behave the same as passing the 'default' kwarg to select()
# *as long as* it is the last element in expr.args.
# If this is not the case, it may be triggered prematurely.
return 'select({0}, {1}, default=nan)'.format(conds, exprs)
def _print_Relational(self, expr):
"Relational printer for Equality and Unequality"
op = {
'==' :'equal',
'!=' :'not_equal',
'<' :'less',
'<=' :'less_equal',
'>' :'greater',
'>=' :'greater_equal',
}
if expr.rel_op in op:
lhs = self._print(expr.lhs)
rhs = self._print(expr.rhs)
return '{op}({lhs}, {rhs})'.format(op=op[expr.rel_op],
lhs=lhs,
rhs=rhs)
return super(NumPyPrinter, self)._print_Relational(expr)
def _print_And(self, expr):
"Logical And printer"
# We have to override LambdaPrinter because it uses Python 'and' keyword.
# If LambdaPrinter didn't define it, we could use StrPrinter's
# version of the function and add 'logical_and' to NUMPY_TRANSLATIONS.
return '{0}({1})'.format('logical_and', ','.join(self._print(i) for i in expr.args))
def _print_Or(self, expr):
"Logical Or printer"
# We have to override LambdaPrinter because it uses Python 'or' keyword.
# If LambdaPrinter didn't define it, we could use StrPrinter's
# version of the function and add 'logical_or' to NUMPY_TRANSLATIONS.
return '{0}({1})'.format('logical_or', ','.join(self._print(i) for i in expr.args))
def _print_Not(self, expr):
"Logical Not printer"
# We have to override LambdaPrinter because it uses Python 'not' keyword.
# If LambdaPrinter didn't define it, we would still have to define our
# own because StrPrinter doesn't define it.
return '{0}({1})'.format('logical_not', ','.join(self._print(i) for i in expr.args))
def _print_Min(self, expr):
return '{0}(({1}))'.format('amin', ','.join(self._print(i) for i in expr.args))
def _print_Max(self, expr):
return '{0}(({1}))'.format('amax', ','.join(self._print(i) for i in expr.args))
# numexpr works by altering the string passed to numexpr.evaluate
# rather than by populating a namespace. Thus a special printer...
class NumExprPrinter(LambdaPrinter):
# key, value pairs correspond to sympy name and numexpr name
# functions not appearing in this dict will raise a TypeError
_numexpr_functions = {
'sin' : 'sin',
'cos' : 'cos',
'tan' : 'tan',
'asin': 'arcsin',
'acos': 'arccos',
'atan': 'arctan',
'atan2' : 'arctan2',
'sinh' : 'sinh',
'cosh' : 'cosh',
'tanh' : 'tanh',
'asinh': 'arcsinh',
'acosh': 'arccosh',
'atanh': 'arctanh',
'ln' : 'log',
'log': 'log',
'exp': 'exp',
'sqrt' : 'sqrt',
'Abs' : 'abs',
'conjugate' : 'conj',
'im' : 'imag',
're' : 'real',
'where' : 'where',
'complex' : 'complex',
'contains' : 'contains',
}
def _print_ImaginaryUnit(self, expr):
return '1j'
def _print_seq(self, seq, delimiter=', '):
# simplified _print_seq taken from pretty.py
s = [self._print(item) for item in seq]
if s:
return delimiter.join(s)
else:
return ""
def _print_Function(self, e):
func_name = e.func.__name__
nstr = self._numexpr_functions.get(func_name, None)
if nstr is None:
# check for implemented_function
if hasattr(e, '_imp_'):
return "(%s)" % self._print(e._imp_(*e.args))
else:
raise TypeError("numexpr does not support function '%s'" %
func_name)
return "%s(%s)" % (nstr, self._print_seq(e.args))
def blacklisted(self, expr):
raise TypeError("numexpr cannot be used with %s" %
expr.__class__.__name__)
# blacklist all Matrix printing
_print_SparseMatrix = \
_print_MutableSparseMatrix = \
_print_ImmutableSparseMatrix = \
_print_Matrix = \
_print_DenseMatrix = \
_print_MutableDenseMatrix = \
_print_ImmutableMatrix = \
_print_ImmutableDenseMatrix = \
blacklisted
# blacklist some python expressions
_print_list = \
_print_tuple = \
_print_Tuple = \
_print_dict = \
_print_Dict = \
blacklisted
def doprint(self, expr):
lstr = super(NumExprPrinter, self).doprint(expr)
return "evaluate('%s', truediv=True)" % lstr
class MpmathPrinter(LambdaPrinter):
"""
Lambda printer for mpmath which maintains precision for floats
"""
def _print_Float(self, e):
# XXX: This does not handle setting mpmath.mp.dps. It is assumed that
# the caller of the lambdified function will have set it to sufficient
# precision to match the Floats in the expression.
# Remove 'mpz' if gmpy is installed.
args = str(tuple(map(int, e._mpf_)))
return 'mpf(%s)' % args
def lambdarepr(expr, **settings):
"""
Returns a string usable for lambdifying.
"""
return LambdaPrinter(settings).doprint(expr)
|
|
#!/usr/bin/env python
#
# Copyright 2015 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Upload and download support for apitools."""
from __future__ import print_function
import email.generator as email_generator
import email.mime.multipart as mime_multipart
import email.mime.nonmultipart as mime_nonmultipart
import io
import json
import mimetypes
import os
import threading
import six
from six.moves import http_client
from apitools.base.py import buffered_stream
from apitools.base.py import compression
from apitools.base.py import exceptions
from apitools.base.py import http_wrapper
from apitools.base.py import stream_slice
from apitools.base.py import util
__all__ = [
'Download',
'Upload',
'RESUMABLE_UPLOAD',
'SIMPLE_UPLOAD',
'DownloadProgressPrinter',
'DownloadCompletePrinter',
'UploadProgressPrinter',
'UploadCompletePrinter',
]
_RESUMABLE_UPLOAD_THRESHOLD = 5 << 20
SIMPLE_UPLOAD = 'simple'
RESUMABLE_UPLOAD = 'resumable'
def DownloadProgressPrinter(response, unused_download):
"""Print download progress based on response."""
if 'content-range' in response.info:
print('Received %s' % response.info['content-range'])
else:
print('Received %d bytes' % response.length)
def DownloadCompletePrinter(unused_response, unused_download):
"""Print information about a completed download."""
print('Download complete')
def UploadProgressPrinter(response, unused_upload):
"""Print upload progress based on response."""
print('Sent %s' % response.info['range'])
def UploadCompletePrinter(unused_response, unused_upload):
"""Print information about a completed upload."""
print('Upload complete')
class _Transfer(object):
"""Generic bits common to Uploads and Downloads."""
def __init__(self, stream, close_stream=False, chunksize=None,
auto_transfer=True, http=None, num_retries=5):
self.__bytes_http = None
self.__close_stream = close_stream
self.__http = http
self.__stream = stream
self.__url = None
self.__num_retries = 5
# Let the @property do validation
self.num_retries = num_retries
self.retry_func = (
http_wrapper.HandleExceptionsAndRebuildHttpConnections)
self.auto_transfer = auto_transfer
self.chunksize = chunksize or 1048576
def __repr__(self):
return str(self)
@property
def close_stream(self):
return self.__close_stream
@property
def http(self):
return self.__http
@property
def bytes_http(self):
return self.__bytes_http or self.http
@bytes_http.setter
def bytes_http(self, value):
self.__bytes_http = value
@property
def num_retries(self):
return self.__num_retries
@num_retries.setter
def num_retries(self, value):
util.Typecheck(value, six.integer_types)
if value < 0:
raise exceptions.InvalidDataError(
'Cannot have negative value for num_retries')
self.__num_retries = value
@property
def stream(self):
return self.__stream
@property
def url(self):
return self.__url
def _Initialize(self, http, url):
"""Initialize this download by setting self.http and self.url.
We want the user to be able to override self.http by having set
the value in the constructor; in that case, we ignore the provided
http.
Args:
http: An httplib2.Http instance or None.
url: The url for this transfer.
Returns:
None. Initializes self.
"""
self.EnsureUninitialized()
if self.http is None:
self.__http = http or http_wrapper.GetHttp()
self.__url = url
@property
def initialized(self):
return self.url is not None and self.http is not None
@property
def _type_name(self):
return type(self).__name__
def EnsureInitialized(self):
if not self.initialized:
raise exceptions.TransferInvalidError(
'Cannot use uninitialized %s' % self._type_name)
def EnsureUninitialized(self):
if self.initialized:
raise exceptions.TransferInvalidError(
'Cannot re-initialize %s' % self._type_name)
def __del__(self):
if self.__close_stream:
self.__stream.close()
def _ExecuteCallback(self, callback, response):
# TODO(craigcitro): Push these into a queue.
if callback is not None:
threading.Thread(target=callback, args=(response, self)).start()
class Download(_Transfer):
"""Data for a single download.
Public attributes:
chunksize: default chunksize to use for transfers.
"""
_ACCEPTABLE_STATUSES = set((
http_client.OK,
http_client.NO_CONTENT,
http_client.PARTIAL_CONTENT,
http_client.REQUESTED_RANGE_NOT_SATISFIABLE,
))
_REQUIRED_SERIALIZATION_KEYS = set((
'auto_transfer', 'progress', 'total_size', 'url'))
def __init__(self, stream, progress_callback=None, finish_callback=None,
**kwds):
total_size = kwds.pop('total_size', None)
super(Download, self).__init__(stream, **kwds)
self.__initial_response = None
self.__progress = 0
self.__total_size = total_size
self.__encoding = None
self.progress_callback = progress_callback
self.finish_callback = finish_callback
@property
def progress(self):
return self.__progress
@property
def encoding(self):
return self.__encoding
@classmethod
def FromFile(cls, filename, overwrite=False, auto_transfer=True, **kwds):
"""Create a new download object from a filename."""
path = os.path.expanduser(filename)
if os.path.exists(path) and not overwrite:
raise exceptions.InvalidUserInputError(
'File %s exists and overwrite not specified' % path)
return cls(open(path, 'wb'), close_stream=True,
auto_transfer=auto_transfer, **kwds)
@classmethod
def FromStream(cls, stream, auto_transfer=True, total_size=None, **kwds):
"""Create a new Download object from a stream."""
return cls(stream, auto_transfer=auto_transfer, total_size=total_size,
**kwds)
@classmethod
def FromData(cls, stream, json_data, http=None, auto_transfer=None,
**kwds):
"""Create a new Download object from a stream and serialized data."""
info = json.loads(json_data)
missing_keys = cls._REQUIRED_SERIALIZATION_KEYS - set(info.keys())
if missing_keys:
raise exceptions.InvalidDataError(
'Invalid serialization data, missing keys: %s' % (
', '.join(missing_keys)))
download = cls.FromStream(stream, **kwds)
if auto_transfer is not None:
download.auto_transfer = auto_transfer
else:
download.auto_transfer = info['auto_transfer']
setattr(download, '_Download__progress', info['progress'])
setattr(download, '_Download__total_size', info['total_size'])
download._Initialize( # pylint: disable=protected-access
http, info['url'])
return download
@property
def serialization_data(self):
self.EnsureInitialized()
return {
'auto_transfer': self.auto_transfer,
'progress': self.progress,
'total_size': self.total_size,
'url': self.url,
}
@property
def total_size(self):
return self.__total_size
def __str__(self):
if not self.initialized:
return 'Download (uninitialized)'
return 'Download with %d/%s bytes transferred from url %s' % (
self.progress, self.total_size, self.url)
def ConfigureRequest(self, http_request, url_builder):
url_builder.query_params['alt'] = 'media'
# TODO(craigcitro): We need to send range requests because by
# default httplib2 stores entire reponses in memory. Override
# httplib2's download method (as gsutil does) so that this is not
# necessary.
http_request.headers['Range'] = 'bytes=0-%d' % (self.chunksize - 1,)
def __SetTotal(self, info):
"""Sets the total size based off info if possible otherwise 0."""
if 'content-range' in info:
_, _, total = info['content-range'].rpartition('/')
if total != '*':
self.__total_size = int(total)
# Note "total_size is None" means we don't know it; if no size
# info was returned on our initial range request, that means we
# have a 0-byte file. (That last statement has been verified
# empirically, but is not clearly documented anywhere.)
if self.total_size is None:
self.__total_size = 0
def InitializeDownload(self, http_request, http=None, client=None):
"""Initialize this download by making a request.
Args:
http_request: The HttpRequest to use to initialize this download.
http: The httplib2.Http instance for this request.
client: If provided, let this client process the final URL before
sending any additional requests. If client is provided and
http is not, client.http will be used instead.
"""
self.EnsureUninitialized()
if http is None and client is None:
raise exceptions.UserError('Must provide client or http.')
http = http or client.http
if client is not None:
http_request.url = client.FinalizeTransferUrl(http_request.url)
url = http_request.url
if self.auto_transfer:
end_byte = self.__ComputeEndByte(0)
self.__SetRangeHeader(http_request, 0, end_byte)
response = http_wrapper.MakeRequest(
self.bytes_http or http, http_request)
if response.status_code not in self._ACCEPTABLE_STATUSES:
raise exceptions.HttpError.FromResponse(response)
self.__initial_response = response
self.__SetTotal(response.info)
url = response.info.get('content-location', response.request_url)
if client is not None:
url = client.FinalizeTransferUrl(url)
self._Initialize(http, url)
# Unless the user has requested otherwise, we want to just
# go ahead and pump the bytes now.
if self.auto_transfer:
self.StreamInChunks()
def __NormalizeStartEnd(self, start, end=None):
"""Normalizes start and end values based on total size."""
if end is not None:
if start < 0:
raise exceptions.TransferInvalidError(
'Cannot have end index with negative start index ' +
'[start=%d, end=%d]' % (start, end))
elif start >= self.total_size:
raise exceptions.TransferInvalidError(
'Cannot have start index greater than total size ' +
'[start=%d, total_size=%d]' % (start, self.total_size))
end = min(end, self.total_size - 1)
if end < start:
raise exceptions.TransferInvalidError(
'Range requested with end[%s] < start[%s]' % (end, start))
return start, end
else:
if start < 0:
start = max(0, start + self.total_size)
return start, self.total_size - 1
def __SetRangeHeader(self, request, start, end=None):
if start < 0:
request.headers['range'] = 'bytes=%d' % start
elif end is None or end < start:
request.headers['range'] = 'bytes=%d-' % start
else:
request.headers['range'] = 'bytes=%d-%d' % (start, end)
def __ComputeEndByte(self, start, end=None, use_chunks=True):
"""Compute the last byte to fetch for this request.
This is all based on the HTTP spec for Range and
Content-Range.
Note that this is potentially confusing in several ways:
* the value for the last byte is 0-based, eg "fetch 10 bytes
from the beginning" would return 9 here.
* if we have no information about size, and don't want to
use the chunksize, we'll return None.
See the tests for more examples.
Args:
start: byte to start at.
end: (int or None, default: None) Suggested last byte.
use_chunks: (bool, default: True) If False, ignore self.chunksize.
Returns:
Last byte to use in a Range header, or None.
"""
end_byte = end
if start < 0 and not self.total_size:
return end_byte
if use_chunks:
alternate = start + self.chunksize - 1
if end_byte is not None:
end_byte = min(end_byte, alternate)
else:
end_byte = alternate
if self.total_size:
alternate = self.total_size - 1
if end_byte is not None:
end_byte = min(end_byte, alternate)
else:
end_byte = alternate
return end_byte
def __GetChunk(self, start, end, additional_headers=None):
"""Retrieve a chunk, and return the full response."""
self.EnsureInitialized()
request = http_wrapper.Request(url=self.url)
self.__SetRangeHeader(request, start, end=end)
if additional_headers is not None:
request.headers.update(additional_headers)
return http_wrapper.MakeRequest(
self.bytes_http, request, retry_func=self.retry_func,
retries=self.num_retries)
def __ProcessResponse(self, response):
"""Process response (by updating self and writing to self.stream)."""
if response.status_code not in self._ACCEPTABLE_STATUSES:
# We distinguish errors that mean we made a mistake in setting
# up the transfer versus something we should attempt again.
if response.status_code in (http_client.FORBIDDEN,
http_client.NOT_FOUND):
raise exceptions.HttpError.FromResponse(response)
else:
raise exceptions.TransferRetryError(response.content)
if response.status_code in (http_client.OK,
http_client.PARTIAL_CONTENT):
try:
self.stream.write(six.ensure_binary(response.content))
except TypeError:
self.stream.write(six.ensure_text(response.content))
self.__progress += response.length
if response.info and 'content-encoding' in response.info:
# TODO(craigcitro): Handle the case where this changes over a
# download.
self.__encoding = response.info['content-encoding']
elif response.status_code == http_client.NO_CONTENT:
# It's important to write something to the stream for the case
# of a 0-byte download to a file, as otherwise python won't
# create the file.
self.stream.write('')
return response
def GetRange(self, start, end=None, additional_headers=None,
use_chunks=True):
"""Retrieve a given byte range from this download, inclusive.
Range must be of one of these three forms:
* 0 <= start, end = None: Fetch from start to the end of the file.
* 0 <= start <= end: Fetch the bytes from start to end.
* start < 0, end = None: Fetch the last -start bytes of the file.
(These variations correspond to those described in the HTTP 1.1
protocol for range headers in RFC 2616, sec. 14.35.1.)
Args:
start: (int) Where to start fetching bytes. (See above.)
end: (int, optional) Where to stop fetching bytes. (See above.)
additional_headers: (bool, optional) Any additional headers to
pass with the request.
use_chunks: (bool, default: True) If False, ignore self.chunksize
and fetch this range in a single request.
Returns:
None. Streams bytes into self.stream.
"""
self.EnsureInitialized()
progress_end_normalized = False
if self.total_size is not None:
progress, end_byte = self.__NormalizeStartEnd(start, end)
progress_end_normalized = True
else:
progress = start
end_byte = end
while (not progress_end_normalized or end_byte is None or
progress <= end_byte):
end_byte = self.__ComputeEndByte(progress, end=end_byte,
use_chunks=use_chunks)
response = self.__GetChunk(progress, end_byte,
additional_headers=additional_headers)
if not progress_end_normalized:
self.__SetTotal(response.info)
progress, end_byte = self.__NormalizeStartEnd(start, end)
progress_end_normalized = True
response = self.__ProcessResponse(response)
progress += response.length
if response.length == 0:
if response.status_code == http_client.OK:
# There can legitimately be no Content-Length header sent
# in some cases (e.g., when there's a Transfer-Encoding
# header) and if this was a 200 response (as opposed to
# 206 Partial Content) we know we're done now without
# looping further on received length.
return
raise exceptions.TransferRetryError(
'Zero bytes unexpectedly returned in download response')
def StreamInChunks(self, callback=None, finish_callback=None,
additional_headers=None):
"""Stream the entire download in chunks."""
self.StreamMedia(callback=callback, finish_callback=finish_callback,
additional_headers=additional_headers,
use_chunks=True)
def StreamMedia(self, callback=None, finish_callback=None,
additional_headers=None, use_chunks=True):
"""Stream the entire download.
Args:
callback: (default: None) Callback to call as each chunk is
completed.
finish_callback: (default: None) Callback to call when the
download is complete.
additional_headers: (default: None) Additional headers to
include in fetching bytes.
use_chunks: (bool, default: True) If False, ignore self.chunksize
and stream this download in a single request.
Returns:
None. Streams bytes into self.stream.
"""
callback = callback or self.progress_callback
finish_callback = finish_callback or self.finish_callback
self.EnsureInitialized()
while True:
if self.__initial_response is not None:
response = self.__initial_response
self.__initial_response = None
else:
end_byte = self.__ComputeEndByte(self.progress,
use_chunks=use_chunks)
response = self.__GetChunk(
self.progress, end_byte,
additional_headers=additional_headers)
if self.total_size is None:
self.__SetTotal(response.info)
response = self.__ProcessResponse(response)
self._ExecuteCallback(callback, response)
if (response.status_code == http_client.OK or
self.progress >= self.total_size):
break
self._ExecuteCallback(finish_callback, response)
if six.PY3:
class MultipartBytesGenerator(email_generator.BytesGenerator):
"""Generates a bytes Message object tree for multipart messages
This is a BytesGenerator that has been modified to not attempt line
termination character modification in the bytes payload. Known to
work with the compat32 policy only. It may work on others, but not
tested. The outfp object must accept bytes in its write method.
"""
def _handle_text(self, msg):
# If the string has surrogates the original source was bytes, so
# just write it back out.
if msg._payload is None:
return
self.write(msg._payload)
# Default body handler
_writeBody = _handle_text
class Upload(_Transfer):
"""Data for a single Upload.
Fields:
stream: The stream to upload.
mime_type: MIME type of the upload.
total_size: (optional) Total upload size for the stream.
close_stream: (default: False) Whether or not we should close the
stream when finished with the upload.
auto_transfer: (default: True) If True, stream all bytes as soon as
the upload is created.
"""
_REQUIRED_SERIALIZATION_KEYS = set((
'auto_transfer', 'mime_type', 'total_size', 'url'))
def __init__(self, stream, mime_type, total_size=None, http=None,
close_stream=False, chunksize=None, auto_transfer=True,
progress_callback=None, finish_callback=None,
gzip_encoded=False, **kwds):
super(Upload, self).__init__(
stream, close_stream=close_stream, chunksize=chunksize,
auto_transfer=auto_transfer, http=http, **kwds)
self.__complete = False
self.__final_response = None
self.__mime_type = mime_type
self.__progress = 0
self.__server_chunk_granularity = None
self.__strategy = None
self.__total_size = None
self.__gzip_encoded = gzip_encoded
self.progress_callback = progress_callback
self.finish_callback = finish_callback
self.total_size = total_size
@property
def progress(self):
return self.__progress
@classmethod
def FromFile(cls, filename, mime_type=None, auto_transfer=True,
gzip_encoded=False, **kwds):
"""Create a new Upload object from a filename."""
path = os.path.expanduser(filename)
if not os.path.exists(path):
raise exceptions.NotFoundError('Could not find file %s' % path)
if not mime_type:
mime_type, _ = mimetypes.guess_type(path)
if mime_type is None:
raise exceptions.InvalidUserInputError(
'Could not guess mime type for %s' % path)
size = os.stat(path).st_size
return cls(open(path, 'rb'), mime_type, total_size=size,
close_stream=True, auto_transfer=auto_transfer,
gzip_encoded=gzip_encoded, **kwds)
@classmethod
def FromStream(cls, stream, mime_type, total_size=None, auto_transfer=True,
gzip_encoded=False, **kwds):
"""Create a new Upload object from a stream."""
if mime_type is None:
raise exceptions.InvalidUserInputError(
'No mime_type specified for stream')
return cls(stream, mime_type, total_size=total_size,
close_stream=False, auto_transfer=auto_transfer,
gzip_encoded=gzip_encoded, **kwds)
@classmethod
def FromData(cls, stream, json_data, http, auto_transfer=None,
gzip_encoded=False, **kwds):
"""Create a new Upload of stream from serialized json_data and http."""
info = json.loads(json_data)
missing_keys = cls._REQUIRED_SERIALIZATION_KEYS - set(info.keys())
if missing_keys:
raise exceptions.InvalidDataError(
'Invalid serialization data, missing keys: %s' % (
', '.join(missing_keys)))
if 'total_size' in kwds:
raise exceptions.InvalidUserInputError(
'Cannot override total_size on serialized Upload')
upload = cls.FromStream(stream, info['mime_type'],
total_size=info.get('total_size'),
gzip_encoded=gzip_encoded, **kwds)
if isinstance(stream, io.IOBase) and not stream.seekable():
raise exceptions.InvalidUserInputError(
'Cannot restart resumable upload on non-seekable stream')
if auto_transfer is not None:
upload.auto_transfer = auto_transfer
else:
upload.auto_transfer = info['auto_transfer']
upload.strategy = RESUMABLE_UPLOAD
upload._Initialize( # pylint: disable=protected-access
http, info['url'])
upload.RefreshResumableUploadState()
upload.EnsureInitialized()
if upload.auto_transfer:
upload.StreamInChunks()
return upload
@property
def serialization_data(self):
self.EnsureInitialized()
if self.strategy != RESUMABLE_UPLOAD:
raise exceptions.InvalidDataError(
'Serialization only supported for resumable uploads')
return {
'auto_transfer': self.auto_transfer,
'mime_type': self.mime_type,
'total_size': self.total_size,
'url': self.url,
}
@property
def complete(self):
return self.__complete
@property
def mime_type(self):
return self.__mime_type
def __str__(self):
if not self.initialized:
return 'Upload (uninitialized)'
return 'Upload with %d/%s bytes transferred for url %s' % (
self.progress, self.total_size or '???', self.url)
@property
def strategy(self):
return self.__strategy
@strategy.setter
def strategy(self, value):
if value not in (SIMPLE_UPLOAD, RESUMABLE_UPLOAD):
raise exceptions.UserError((
'Invalid value "%s" for upload strategy, must be one of '
'"simple" or "resumable".') % value)
self.__strategy = value
@property
def total_size(self):
return self.__total_size
@total_size.setter
def total_size(self, value):
self.EnsureUninitialized()
self.__total_size = value
def __SetDefaultUploadStrategy(self, upload_config, http_request):
"""Determine and set the default upload strategy for this upload.
We generally prefer simple or multipart, unless we're forced to
use resumable. This happens when any of (1) the upload is too
large, (2) the simple endpoint doesn't support multipart requests
and we have metadata, or (3) there is no simple upload endpoint.
Args:
upload_config: Configuration for the upload endpoint.
http_request: The associated http request.
Returns:
None.
"""
if upload_config.resumable_path is None:
self.strategy = SIMPLE_UPLOAD
if self.strategy is not None:
return
strategy = SIMPLE_UPLOAD
if (self.total_size is not None and
self.total_size > _RESUMABLE_UPLOAD_THRESHOLD):
strategy = RESUMABLE_UPLOAD
if http_request.body and not upload_config.simple_multipart:
strategy = RESUMABLE_UPLOAD
if not upload_config.simple_path:
strategy = RESUMABLE_UPLOAD
self.strategy = strategy
def ConfigureRequest(self, upload_config, http_request, url_builder):
"""Configure the request and url for this upload."""
# Validate total_size vs. max_size
if (self.total_size and upload_config.max_size and
self.total_size > upload_config.max_size):
raise exceptions.InvalidUserInputError(
'Upload too big: %s larger than max size %s' % (
self.total_size, upload_config.max_size))
# Validate mime type
if not util.AcceptableMimeType(upload_config.accept, self.mime_type):
raise exceptions.InvalidUserInputError(
'MIME type %s does not match any accepted MIME ranges %s' % (
self.mime_type, upload_config.accept))
self.__SetDefaultUploadStrategy(upload_config, http_request)
if self.strategy == SIMPLE_UPLOAD:
url_builder.relative_path = upload_config.simple_path
if http_request.body:
url_builder.query_params['uploadType'] = 'multipart'
self.__ConfigureMultipartRequest(http_request)
else:
url_builder.query_params['uploadType'] = 'media'
self.__ConfigureMediaRequest(http_request)
# Once the entire body is written, compress the body if configured
# to. Both multipart and media request uploads will read the
# entire stream into memory, which means full compression is also
# safe to perform. Because the strategy is set to SIMPLE_UPLOAD,
# StreamInChunks throws an exception, meaning double compression
# cannot happen.
if self.__gzip_encoded:
http_request.headers['Content-Encoding'] = 'gzip'
# Turn the body into a stream so that we can compress it, then
# read the compressed bytes. In the event of a retry (e.g. if
# our access token has expired), we need to be able to re-read
# the body, which we can't do with a stream. So, we consume the
# bytes from the stream now and store them in a re-readable
# bytes container.
http_request.body = (
compression.CompressStream(
six.BytesIO(http_request.body))[0].read())
else:
url_builder.relative_path = upload_config.resumable_path
url_builder.query_params['uploadType'] = 'resumable'
self.__ConfigureResumableRequest(http_request)
def __ConfigureMediaRequest(self, http_request):
"""Configure http_request as a simple request for this upload."""
http_request.headers['content-type'] = self.mime_type
http_request.body = self.stream.read()
http_request.loggable_body = '<media body>'
def __ConfigureMultipartRequest(self, http_request):
"""Configure http_request as a multipart request for this upload."""
# This is a multipart/related upload.
msg_root = mime_multipart.MIMEMultipart('related')
# msg_root should not write out its own headers
setattr(msg_root, '_write_headers', lambda self: None)
# attach the body as one part
msg = mime_nonmultipart.MIMENonMultipart(
*http_request.headers['content-type'].split('/'))
msg.set_payload(http_request.body)
msg_root.attach(msg)
# attach the media as the second part
msg = mime_nonmultipart.MIMENonMultipart(*self.mime_type.split('/'))
msg['Content-Transfer-Encoding'] = 'binary'
msg.set_payload(self.stream.read())
msg_root.attach(msg)
# NOTE: We encode the body, but can't use
# `email.message.Message.as_string` because it prepends
# `> ` to `From ` lines.
fp = six.BytesIO()
if six.PY3:
generator_class = MultipartBytesGenerator
else:
generator_class = email_generator.Generator
g = generator_class(fp, mangle_from_=False)
g.flatten(msg_root, unixfrom=False)
http_request.body = fp.getvalue()
multipart_boundary = msg_root.get_boundary()
http_request.headers['content-type'] = (
'multipart/related; boundary=%r' % multipart_boundary)
if isinstance(multipart_boundary, six.text_type):
multipart_boundary = multipart_boundary.encode('ascii')
body_components = http_request.body.split(multipart_boundary)
headers, _, _ = body_components[-2].partition(b'\n\n')
body_components[-2] = b'\n\n'.join([headers, b'<media body>\n\n--'])
http_request.loggable_body = multipart_boundary.join(body_components)
def __ConfigureResumableRequest(self, http_request):
http_request.headers['X-Upload-Content-Type'] = self.mime_type
if self.total_size is not None:
http_request.headers[
'X-Upload-Content-Length'] = str(self.total_size)
def RefreshResumableUploadState(self):
"""Talk to the server and refresh the state of this resumable upload.
Returns:
Response if the upload is complete.
"""
if self.strategy != RESUMABLE_UPLOAD:
return
self.EnsureInitialized()
refresh_request = http_wrapper.Request(
url=self.url, http_method='PUT',
headers={'Content-Range': 'bytes */*'})
refresh_response = http_wrapper.MakeRequest(
self.http, refresh_request, redirections=0,
retries=self.num_retries)
range_header = self._GetRangeHeaderFromResponse(refresh_response)
if refresh_response.status_code in (http_client.OK,
http_client.CREATED):
self.__complete = True
self.__progress = self.total_size
self.stream.seek(self.progress)
# If we're finished, the refresh response will contain the metadata
# originally requested. Cache it so it can be returned in
# StreamInChunks.
self.__final_response = refresh_response
elif refresh_response.status_code == http_wrapper.RESUME_INCOMPLETE:
if range_header is None:
self.__progress = 0
else:
self.__progress = self.__GetLastByte(range_header) + 1
self.stream.seek(self.progress)
else:
raise exceptions.HttpError.FromResponse(refresh_response)
def _GetRangeHeaderFromResponse(self, response):
return response.info.get('Range', response.info.get('range'))
def InitializeUpload(self, http_request, http=None, client=None):
"""Initialize this upload from the given http_request."""
if self.strategy is None:
raise exceptions.UserError(
'No upload strategy set; did you call ConfigureRequest?')
if http is None and client is None:
raise exceptions.UserError('Must provide client or http.')
if self.strategy != RESUMABLE_UPLOAD:
return
http = http or client.http
if client is not None:
http_request.url = client.FinalizeTransferUrl(http_request.url)
self.EnsureUninitialized()
http_response = http_wrapper.MakeRequest(http, http_request,
retries=self.num_retries)
if http_response.status_code != http_client.OK:
raise exceptions.HttpError.FromResponse(http_response)
self.__server_chunk_granularity = http_response.info.get(
'X-Goog-Upload-Chunk-Granularity')
url = http_response.info['location']
if client is not None:
url = client.FinalizeTransferUrl(url)
self._Initialize(http, url)
# Unless the user has requested otherwise, we want to just
# go ahead and pump the bytes now.
if self.auto_transfer:
return self.StreamInChunks()
return http_response
def __GetLastByte(self, range_header):
_, _, end = range_header.partition('-')
# TODO(craigcitro): Validate start == 0?
return int(end)
def __ValidateChunksize(self, chunksize=None):
if self.__server_chunk_granularity is None:
return
chunksize = chunksize or self.chunksize
if chunksize % self.__server_chunk_granularity:
raise exceptions.ConfigurationValueError(
'Server requires chunksize to be a multiple of %d' %
self.__server_chunk_granularity)
def __IsRetryable(self, response):
return (response.status_code >= 500 or
response.status_code == http_wrapper.TOO_MANY_REQUESTS or
response.retry_after)
def __StreamMedia(self, callback=None, finish_callback=None,
additional_headers=None, use_chunks=True):
"""Helper function for StreamMedia / StreamInChunks."""
if self.strategy != RESUMABLE_UPLOAD:
raise exceptions.InvalidUserInputError(
'Cannot stream non-resumable upload')
callback = callback or self.progress_callback
finish_callback = finish_callback or self.finish_callback
# final_response is set if we resumed an already-completed upload.
response = self.__final_response
def CallSendChunk(start):
return self.__SendChunk(
start, additional_headers=additional_headers)
def CallSendMediaBody(start):
return self.__SendMediaBody(
start, additional_headers=additional_headers)
send_func = CallSendChunk if use_chunks else CallSendMediaBody
if not use_chunks and self.__gzip_encoded:
raise exceptions.InvalidUserInputError(
'Cannot gzip encode non-chunked upload')
if use_chunks:
self.__ValidateChunksize(self.chunksize)
self.EnsureInitialized()
while not self.complete:
response = send_func(self.stream.tell())
if response.status_code in (http_client.OK, http_client.CREATED):
self.__complete = True
break
if response.status_code not in (
http_client.OK, http_client.CREATED,
http_wrapper.RESUME_INCOMPLETE):
# Only raise an exception if the error is something we can't
# recover from.
if (self.strategy != RESUMABLE_UPLOAD or
not self.__IsRetryable(response)):
raise exceptions.HttpError.FromResponse(response)
# We want to reset our state to wherever the server left us
# before this failed request, and then raise.
self.RefreshResumableUploadState()
self._ExecuteCallback(callback, response)
continue
self.__progress = self.__GetLastByte(
self._GetRangeHeaderFromResponse(response))
if self.progress + 1 != self.stream.tell():
# TODO(craigcitro): Add a better way to recover here.
raise exceptions.CommunicationError(
'Failed to transfer all bytes in chunk, upload paused at '
'byte %d' % self.progress)
self._ExecuteCallback(callback, response)
if self.__complete and hasattr(self.stream, 'seek'):
current_pos = self.stream.tell()
self.stream.seek(0, os.SEEK_END)
end_pos = self.stream.tell()
self.stream.seek(current_pos)
if current_pos != end_pos:
raise exceptions.TransferInvalidError(
'Upload complete with %s additional bytes left in stream' %
(int(end_pos) - int(current_pos)))
self._ExecuteCallback(finish_callback, response)
return response
def StreamMedia(self, callback=None, finish_callback=None,
additional_headers=None):
"""Send this resumable upload in a single request.
Args:
callback: Progress callback function with inputs
(http_wrapper.Response, transfer.Upload)
finish_callback: Final callback function with inputs
(http_wrapper.Response, transfer.Upload)
additional_headers: Dict of headers to include with the upload
http_wrapper.Request.
Returns:
http_wrapper.Response of final response.
"""
return self.__StreamMedia(
callback=callback, finish_callback=finish_callback,
additional_headers=additional_headers, use_chunks=False)
def StreamInChunks(self, callback=None, finish_callback=None,
additional_headers=None):
"""Send this (resumable) upload in chunks."""
return self.__StreamMedia(
callback=callback, finish_callback=finish_callback,
additional_headers=additional_headers)
def __SendMediaRequest(self, request, end):
"""Request helper function for SendMediaBody & SendChunk."""
def CheckResponse(response):
if response is None:
# Caller shouldn't call us if the response is None,
# but handle anyway.
raise exceptions.RequestError(
'Request to url %s did not return a response.' %
response.request_url)
response = http_wrapper.MakeRequest(
self.bytes_http, request, retry_func=self.retry_func,
retries=self.num_retries, check_response_func=CheckResponse)
if response.status_code == http_wrapper.RESUME_INCOMPLETE:
last_byte = self.__GetLastByte(
self._GetRangeHeaderFromResponse(response))
if last_byte + 1 != end:
self.stream.seek(last_byte + 1)
return response
def __SendMediaBody(self, start, additional_headers=None):
"""Send the entire media stream in a single request."""
self.EnsureInitialized()
if self.total_size is None:
raise exceptions.TransferInvalidError(
'Total size must be known for SendMediaBody')
body_stream = stream_slice.StreamSlice(
self.stream, self.total_size - start)
request = http_wrapper.Request(url=self.url, http_method='PUT',
body=body_stream)
request.headers['Content-Type'] = self.mime_type
if start == self.total_size:
# End of an upload with 0 bytes left to send; just finalize.
range_string = 'bytes */%s' % self.total_size
else:
range_string = 'bytes %s-%s/%s' % (start, self.total_size - 1,
self.total_size)
request.headers['Content-Range'] = range_string
if additional_headers:
request.headers.update(additional_headers)
return self.__SendMediaRequest(request, self.total_size)
def __SendChunk(self, start, additional_headers=None):
"""Send the specified chunk."""
self.EnsureInitialized()
no_log_body = self.total_size is None
request = http_wrapper.Request(url=self.url, http_method='PUT')
if self.__gzip_encoded:
request.headers['Content-Encoding'] = 'gzip'
body_stream, read_length, exhausted = compression.CompressStream(
self.stream, self.chunksize)
end = start + read_length
# If the stream length was previously unknown and the input stream
# is exhausted, then we're at the end of the stream.
if self.total_size is None and exhausted:
self.__total_size = end
elif self.total_size is None:
# For the streaming resumable case, we need to detect when
# we're at the end of the stream.
body_stream = buffered_stream.BufferedStream(
self.stream, start, self.chunksize)
end = body_stream.stream_end_position
if body_stream.stream_exhausted:
self.__total_size = end
# TODO: Here, change body_stream from a stream to a string object,
# which means reading a chunk into memory. This works around
# https://code.google.com/p/httplib2/issues/detail?id=176 which can
# cause httplib2 to skip bytes on 401's for file objects.
# Rework this solution to be more general.
body_stream = body_stream.read(self.chunksize)
else:
end = min(start + self.chunksize, self.total_size)
body_stream = stream_slice.StreamSlice(self.stream, end - start)
# TODO(craigcitro): Think about clearer errors on "no data in
# stream".
request.body = body_stream
request.headers['Content-Type'] = self.mime_type
if no_log_body:
# Disable logging of streaming body.
# TODO: Remove no_log_body and rework as part of a larger logs
# refactor.
request.loggable_body = '<media body>'
if self.total_size is None:
# Streaming resumable upload case, unknown total size.
range_string = 'bytes %s-%s/*' % (start, end - 1)
elif end == start:
# End of an upload with 0 bytes left to send; just finalize.
range_string = 'bytes */%s' % self.total_size
else:
# Normal resumable upload case with known sizes.
range_string = 'bytes %s-%s/%s' % (start, end - 1, self.total_size)
request.headers['Content-Range'] = range_string
if additional_headers:
request.headers.update(additional_headers)
return self.__SendMediaRequest(request, end)
|
|
# Copyright (c) 2017 The WebRTC project authors. All Rights Reserved.
#
# Use of this source code is governed by a BSD-style license
# that can be found in the LICENSE file in the root of the source
# tree. An additional intellectual property rights grant can be found
# in the file PATENTS. All contributing project authors may
# be found in the AUTHORS file in the root of the source tree.
"""Signal processing utility module.
"""
import array
import logging
import os
import sys
try:
import numpy as np
except ImportError:
logging.critical('Cannot import the third-party Python package numpy')
sys.exit(1)
try:
import pydub
import pydub.generators
except ImportError:
logging.critical('Cannot import the third-party Python package pydub')
sys.exit(1)
try:
import scipy.signal
except ImportError:
logging.critical('Cannot import the third-party Python package scipy')
sys.exit(1)
from . import exceptions
class SignalProcessingUtils(object):
"""Collection of signal processing utilities.
"""
def __init__(self):
pass
@classmethod
def LoadWav(cls, filepath, channels=1):
"""Loads wav file.
Args:
filepath: path to the wav audio track file to load.
channels: number of channels (downmixing to mono by default).
Returns:
AudioSegment instance.
"""
if not os.path.exists(filepath):
logging.error('cannot find the <%s> audio track file', filepath)
raise exceptions.FileNotFoundError()
return pydub.AudioSegment.from_file(
filepath, format='wav', channels=channels)
@classmethod
def SaveWav(cls, output_filepath, signal):
"""Saves wav file.
Args:
output_filepath: path to the wav audio track file to save.
signal: AudioSegment instance.
"""
return signal.export(output_filepath, format='wav')
@classmethod
def CountSamples(cls, signal):
"""Number of samples per channel.
Args:
signal: AudioSegment instance.
Returns:
An integer.
"""
number_of_samples = len(signal.get_array_of_samples())
assert signal.channels > 0
assert number_of_samples % signal.channels == 0
return number_of_samples / signal.channels
@classmethod
def GenerateWhiteNoise(cls, signal):
"""Generates white noise.
White noise is generated with the same duration and in the same format as a
given signal.
Args:
signal: AudioSegment instance.
Return:
AudioSegment instance.
"""
generator = pydub.generators.WhiteNoise(
sample_rate=signal.frame_rate,
bit_depth=signal.sample_width * 8)
return generator.to_audio_segment(
duration=len(signal),
volume=0.0)
@classmethod
def ApplyImpulseResponse(cls, signal, impulse_response):
"""Applies an impulse response to a signal.
Args:
signal: AudioSegment instance.
impulse_response: list or numpy vector of float values.
Returns:
AudioSegment instance.
"""
# Get samples.
assert signal.channels == 1, (
'multiple-channel recordings not supported')
samples = signal.get_array_of_samples()
# Convolve.
logging.info('applying %d order impulse response to a signal lasting %d ms',
len(impulse_response), len(signal))
convolved_samples = scipy.signal.fftconvolve(
in1=samples,
in2=impulse_response,
mode='full').astype(np.int16)
logging.info('convolution computed')
# Cast.
convolved_samples = array.array(signal.array_type, convolved_samples)
# Verify.
logging.debug('signal length: %d samples', len(samples))
logging.debug('convolved signal length: %d samples', len(convolved_samples))
assert len(convolved_samples) > len(samples)
# Generate convolved signal AudioSegment instance.
convolved_signal = pydub.AudioSegment(
data=convolved_samples,
metadata={
'sample_width': signal.sample_width,
'frame_rate': signal.frame_rate,
'frame_width': signal.frame_width,
'channels': signal.channels,
})
assert len(convolved_signal) > len(signal)
return convolved_signal
@classmethod
def Normalize(cls, signal):
"""Normalizes a signal.
Args:
signal: AudioSegment instance.
Returns:
An AudioSegment instance.
"""
return signal.apply_gain(-signal.max_dBFS)
@classmethod
def Copy(cls, signal):
"""Makes a copy os a signal.
Args:
signal: AudioSegment instance.
Returns:
An AudioSegment instance.
"""
return pydub.AudioSegment(
data=signal.get_array_of_samples(),
metadata={
'sample_width': signal.sample_width,
'frame_rate': signal.frame_rate,
'frame_width': signal.frame_width,
'channels': signal.channels,
})
@classmethod
def MixSignals(cls, signal, noise, target_snr=0.0, bln_pad_shortest=False):
"""Mixes two signals with a target SNR.
Mix two signals with a desired SNR by scaling noise (noise).
If the target SNR is +/- infinite, a copy of signal/noise is returned.
Args:
signal: AudioSegment instance (signal).
noise: AudioSegment instance (noise).
target_snr: float, numpy.Inf or -numpy.Inf (dB).
bln_pad_shortest: if True, it pads the shortest signal with silence at the
end.
Returns:
An AudioSegment instance.
"""
# Handle infinite target SNR.
if target_snr == -np.Inf:
# Return a copy of noise.
logging.warning('SNR = -Inf, returning noise')
return cls.Copy(noise)
elif target_snr == np.Inf:
# Return a copy of signal.
logging.warning('SNR = +Inf, returning signal')
return cls.Copy(signal)
# Check signal and noise power.
signal_power = float(signal.dBFS)
noise_power = float(noise.dBFS)
if signal_power == -np.Inf:
logging.error('signal has -Inf power, cannot mix')
raise exceptions.SignalProcessingException(
'cannot mix a signal with -Inf power')
if noise_power == -np.Inf:
logging.error('noise has -Inf power, cannot mix')
raise exceptions.SignalProcessingException(
'cannot mix a signal with -Inf power')
# Pad signal (if necessary). If noise is the shortest, the AudioSegment
# overlay() method implictly pads noise. Hence, the only case to handle
# is signal shorter than noise and bln_pad_shortest True.
if bln_pad_shortest:
signal_duration = len(signal)
noise_duration = len(noise)
logging.warning('mix signals with padding')
logging.warning(' signal: %d ms', signal_duration)
logging.warning(' noise: %d ms', noise_duration)
padding_duration = noise_duration - signal_duration
if padding_duration > 0: # That is signal_duration < noise_duration.
logging.debug(' padding: %d ms', padding_duration)
padding = pydub.AudioSegment.silent(
duration=padding_duration,
frame_rate=signal.frame_rate)
logging.debug(' signal (pre): %d ms', len(signal))
signal = signal + padding
logging.debug(' signal (post): %d ms', len(signal))
# Update power.
signal_power = float(signal.dBFS)
# Mix signals using the target SNR.
gain_db = signal_power - noise_power - target_snr
return cls.Normalize(signal.overlay(noise.apply_gain(gain_db)))
|
|
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
from typing import TYPE_CHECKING
import warnings
from azure.core.exceptions import ClientAuthenticationError, HttpResponseError, ResourceExistsError, ResourceNotFoundError, map_error
from azure.core.paging import ItemPaged
from azure.core.pipeline import PipelineResponse
from azure.core.pipeline.transport import HttpRequest, HttpResponse
from azure.core.polling import LROPoller, NoPolling, PollingMethod
from azure.mgmt.core.exceptions import ARMErrorFormat
from azure.mgmt.core.polling.arm_polling import ARMPolling
from .. import models as _models
if TYPE_CHECKING:
# pylint: disable=unused-import,ungrouped-imports
from typing import Any, Callable, Dict, Generic, Iterable, Optional, TypeVar, Union
T = TypeVar('T')
ClsType = Optional[Callable[[PipelineResponse[HttpRequest, HttpResponse], T, Dict[str, Any]], Any]]
class VpnGatewaysOperations(object):
"""VpnGatewaysOperations operations.
You should not instantiate this class directly. Instead, you should create a Client instance that
instantiates it for you and attaches it as an attribute.
:ivar models: Alias to model classes used in this operation group.
:type models: ~azure.mgmt.network.v2018_11_01.models
:param client: Client for service requests.
:param config: Configuration of service client.
:param serializer: An object model serializer.
:param deserializer: An object model deserializer.
"""
models = _models
def __init__(self, client, config, serializer, deserializer):
self._client = client
self._serialize = serializer
self._deserialize = deserializer
self._config = config
def get(
self,
resource_group_name, # type: str
gateway_name, # type: str
**kwargs # type: Any
):
# type: (...) -> "_models.VpnGateway"
"""Retrieves the details of a virtual wan vpn gateway.
:param resource_group_name: The resource group name of the VpnGateway.
:type resource_group_name: str
:param gateway_name: The name of the gateway.
:type gateway_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: VpnGateway, or the result of cls(response)
:rtype: ~azure.mgmt.network.v2018_11_01.models.VpnGateway
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.VpnGateway"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2018-11-01"
accept = "application/json"
# Construct URL
url = self.get.metadata['url'] # type: ignore
path_format_arguments = {
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'gatewayName': self._serialize.url("gateway_name", gateway_name, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request = self._client.get(url, query_parameters, header_parameters)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.Error, response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
deserialized = self._deserialize('VpnGateway', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
get.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/vpnGateways/{gatewayName}'} # type: ignore
def _create_or_update_initial(
self,
resource_group_name, # type: str
gateway_name, # type: str
vpn_gateway_parameters, # type: "_models.VpnGateway"
**kwargs # type: Any
):
# type: (...) -> "_models.VpnGateway"
cls = kwargs.pop('cls', None) # type: ClsType["_models.VpnGateway"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2018-11-01"
content_type = kwargs.pop("content_type", "application/json")
accept = "application/json"
# Construct URL
url = self._create_or_update_initial.metadata['url'] # type: ignore
path_format_arguments = {
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'gatewayName': self._serialize.url("gateway_name", gateway_name, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str')
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
body_content_kwargs = {} # type: Dict[str, Any]
body_content = self._serialize.body(vpn_gateway_parameters, 'VpnGateway')
body_content_kwargs['content'] = body_content
request = self._client.put(url, query_parameters, header_parameters, **body_content_kwargs)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 201]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.Error, response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
if response.status_code == 200:
deserialized = self._deserialize('VpnGateway', pipeline_response)
if response.status_code == 201:
deserialized = self._deserialize('VpnGateway', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
_create_or_update_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/vpnGateways/{gatewayName}'} # type: ignore
def begin_create_or_update(
self,
resource_group_name, # type: str
gateway_name, # type: str
vpn_gateway_parameters, # type: "_models.VpnGateway"
**kwargs # type: Any
):
# type: (...) -> LROPoller["_models.VpnGateway"]
"""Creates a virtual wan vpn gateway if it doesn't exist else updates the existing gateway.
:param resource_group_name: The resource group name of the VpnGateway.
:type resource_group_name: str
:param gateway_name: The name of the gateway.
:type gateway_name: str
:param vpn_gateway_parameters: Parameters supplied to create or Update a virtual wan vpn
gateway.
:type vpn_gateway_parameters: ~azure.mgmt.network.v2018_11_01.models.VpnGateway
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: By default, your polling method will be ARMPolling.
Pass in False for this operation to not poll, or pass in your own initialized polling object for a personal polling strategy.
:paramtype polling: bool or ~azure.core.polling.PollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present.
:return: An instance of LROPoller that returns either VpnGateway or the result of cls(response)
:rtype: ~azure.core.polling.LROPoller[~azure.mgmt.network.v2018_11_01.models.VpnGateway]
:raises ~azure.core.exceptions.HttpResponseError:
"""
polling = kwargs.pop('polling', True) # type: Union[bool, PollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType["_models.VpnGateway"]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = self._create_or_update_initial(
resource_group_name=resource_group_name,
gateway_name=gateway_name,
vpn_gateway_parameters=vpn_gateway_parameters,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
kwargs.pop('content_type', None)
def get_long_running_output(pipeline_response):
deserialized = self._deserialize('VpnGateway', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
path_format_arguments = {
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'gatewayName': self._serialize.url("gateway_name", gateway_name, 'str'),
}
if polling is True: polling_method = ARMPolling(lro_delay, path_format_arguments=path_format_arguments, **kwargs)
elif polling is False: polling_method = NoPolling()
else: polling_method = polling
if cont_token:
return LROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return LROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_create_or_update.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/vpnGateways/{gatewayName}'} # type: ignore
def _update_tags_initial(
self,
resource_group_name, # type: str
gateway_name, # type: str
vpn_gateway_parameters, # type: "_models.TagsObject"
**kwargs # type: Any
):
# type: (...) -> "_models.VpnGateway"
cls = kwargs.pop('cls', None) # type: ClsType["_models.VpnGateway"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2018-11-01"
content_type = kwargs.pop("content_type", "application/json")
accept = "application/json"
# Construct URL
url = self._update_tags_initial.metadata['url'] # type: ignore
path_format_arguments = {
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'gatewayName': self._serialize.url("gateway_name", gateway_name, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str')
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
body_content_kwargs = {} # type: Dict[str, Any]
body_content = self._serialize.body(vpn_gateway_parameters, 'TagsObject')
body_content_kwargs['content'] = body_content
request = self._client.patch(url, query_parameters, header_parameters, **body_content_kwargs)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 201]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.Error, response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
if response.status_code == 200:
deserialized = self._deserialize('VpnGateway', pipeline_response)
if response.status_code == 201:
deserialized = self._deserialize('VpnGateway', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
_update_tags_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/vpnGateways/{gatewayName}'} # type: ignore
def begin_update_tags(
self,
resource_group_name, # type: str
gateway_name, # type: str
vpn_gateway_parameters, # type: "_models.TagsObject"
**kwargs # type: Any
):
# type: (...) -> LROPoller["_models.VpnGateway"]
"""Updates virtual wan vpn gateway tags.
:param resource_group_name: The resource group name of the VpnGateway.
:type resource_group_name: str
:param gateway_name: The name of the gateway.
:type gateway_name: str
:param vpn_gateway_parameters: Parameters supplied to update a virtual wan vpn gateway tags.
:type vpn_gateway_parameters: ~azure.mgmt.network.v2018_11_01.models.TagsObject
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: By default, your polling method will be ARMPolling.
Pass in False for this operation to not poll, or pass in your own initialized polling object for a personal polling strategy.
:paramtype polling: bool or ~azure.core.polling.PollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present.
:return: An instance of LROPoller that returns either VpnGateway or the result of cls(response)
:rtype: ~azure.core.polling.LROPoller[~azure.mgmt.network.v2018_11_01.models.VpnGateway]
:raises ~azure.core.exceptions.HttpResponseError:
"""
polling = kwargs.pop('polling', True) # type: Union[bool, PollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType["_models.VpnGateway"]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = self._update_tags_initial(
resource_group_name=resource_group_name,
gateway_name=gateway_name,
vpn_gateway_parameters=vpn_gateway_parameters,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
kwargs.pop('content_type', None)
def get_long_running_output(pipeline_response):
deserialized = self._deserialize('VpnGateway', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
path_format_arguments = {
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'gatewayName': self._serialize.url("gateway_name", gateway_name, 'str'),
}
if polling is True: polling_method = ARMPolling(lro_delay, path_format_arguments=path_format_arguments, **kwargs)
elif polling is False: polling_method = NoPolling()
else: polling_method = polling
if cont_token:
return LROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return LROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_update_tags.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/vpnGateways/{gatewayName}'} # type: ignore
def _delete_initial(
self,
resource_group_name, # type: str
gateway_name, # type: str
**kwargs # type: Any
):
# type: (...) -> None
cls = kwargs.pop('cls', None) # type: ClsType[None]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2018-11-01"
accept = "application/json"
# Construct URL
url = self._delete_initial.metadata['url'] # type: ignore
path_format_arguments = {
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'gatewayName': self._serialize.url("gateway_name", gateway_name, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request = self._client.delete(url, query_parameters, header_parameters)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 202, 204]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.Error, response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
if cls:
return cls(pipeline_response, None, {})
_delete_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/vpnGateways/{gatewayName}'} # type: ignore
def begin_delete(
self,
resource_group_name, # type: str
gateway_name, # type: str
**kwargs # type: Any
):
# type: (...) -> LROPoller[None]
"""Deletes a virtual wan vpn gateway.
:param resource_group_name: The resource group name of the VpnGateway.
:type resource_group_name: str
:param gateway_name: The name of the gateway.
:type gateway_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: By default, your polling method will be ARMPolling.
Pass in False for this operation to not poll, or pass in your own initialized polling object for a personal polling strategy.
:paramtype polling: bool or ~azure.core.polling.PollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present.
:return: An instance of LROPoller that returns either None or the result of cls(response)
:rtype: ~azure.core.polling.LROPoller[None]
:raises ~azure.core.exceptions.HttpResponseError:
"""
polling = kwargs.pop('polling', True) # type: Union[bool, PollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType[None]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = self._delete_initial(
resource_group_name=resource_group_name,
gateway_name=gateway_name,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
kwargs.pop('content_type', None)
def get_long_running_output(pipeline_response):
if cls:
return cls(pipeline_response, None, {})
path_format_arguments = {
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'gatewayName': self._serialize.url("gateway_name", gateway_name, 'str'),
}
if polling is True: polling_method = ARMPolling(lro_delay, path_format_arguments=path_format_arguments, **kwargs)
elif polling is False: polling_method = NoPolling()
else: polling_method = polling
if cont_token:
return LROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return LROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_delete.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/vpnGateways/{gatewayName}'} # type: ignore
def list_by_resource_group(
self,
resource_group_name, # type: str
**kwargs # type: Any
):
# type: (...) -> Iterable["_models.ListVpnGatewaysResult"]
"""Lists all the VpnGateways in a resource group.
:param resource_group_name: The resource group name of the VpnGateway.
:type resource_group_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either ListVpnGatewaysResult or the result of cls(response)
:rtype: ~azure.core.paging.ItemPaged[~azure.mgmt.network.v2018_11_01.models.ListVpnGatewaysResult]
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.ListVpnGatewaysResult"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2018-11-01"
accept = "application/json"
def prepare_request(next_link=None):
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
if not next_link:
# Construct URL
url = self.list_by_resource_group.metadata['url'] # type: ignore
path_format_arguments = {
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
request = self._client.get(url, query_parameters, header_parameters)
else:
url = next_link
query_parameters = {} # type: Dict[str, Any]
request = self._client.get(url, query_parameters, header_parameters)
return request
def extract_data(pipeline_response):
deserialized = self._deserialize('ListVpnGatewaysResult', pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return deserialized.next_link or None, iter(list_of_elem)
def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
error = self._deserialize.failsafe_deserialize(_models.Error, response)
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
return pipeline_response
return ItemPaged(
get_next, extract_data
)
list_by_resource_group.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/vpnGateways'} # type: ignore
def list(
self,
**kwargs # type: Any
):
# type: (...) -> Iterable["_models.ListVpnGatewaysResult"]
"""Lists all the VpnGateways in a subscription.
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either ListVpnGatewaysResult or the result of cls(response)
:rtype: ~azure.core.paging.ItemPaged[~azure.mgmt.network.v2018_11_01.models.ListVpnGatewaysResult]
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.ListVpnGatewaysResult"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2018-11-01"
accept = "application/json"
def prepare_request(next_link=None):
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
if not next_link:
# Construct URL
url = self.list.metadata['url'] # type: ignore
path_format_arguments = {
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
request = self._client.get(url, query_parameters, header_parameters)
else:
url = next_link
query_parameters = {} # type: Dict[str, Any]
request = self._client.get(url, query_parameters, header_parameters)
return request
def extract_data(pipeline_response):
deserialized = self._deserialize('ListVpnGatewaysResult', pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return deserialized.next_link or None, iter(list_of_elem)
def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
error = self._deserialize.failsafe_deserialize(_models.Error, response)
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
return pipeline_response
return ItemPaged(
get_next, extract_data
)
list.metadata = {'url': '/subscriptions/{subscriptionId}/providers/Microsoft.Network/vpnGateways'} # type: ignore
|
|
# -*- coding: utf-8 -*-
"""
sockjs.tornado.router
~~~~~~~~~~~~~~~~~~~~~
SockJS protocol router implementation.
"""
from tornado import ioloop, version_info
from sockjs.tornado import transports, session, sessioncontainer, static, stats, proto
DEFAULT_SETTINGS = {
# Sessions check interval in seconds
'session_check_interval': 1,
# Session expiration in seconds
'disconnect_delay': 5,
# Heartbeat time in seconds. Do not change this value unless
# you absolutely sure that new value will work.
'heartbeat_delay': 25,
# Enabled protocols
'disabled_transports': [],
# SockJS location
'sockjs_url': 'https://cdn.jsdelivr.net/sockjs/0.3/sockjs.min.js',
# Max response body size
'response_limit': 128 * 1024,
# Enable or disable JSESSIONID cookie handling
'jsessionid': True,
# Should sockjs-tornado flush messages immediately or queue then and
# flush on next ioloop tick
'immediate_flush': True,
# Enable or disable Nagle for persistent transports
'disable_nagle': True,
# Enable IP checks for polling transports. If enabled, all subsequent
# polling calls should be from the same IP address.
'verify_ip': True,
# list of allowed origins for websocket connections
# or "*" - accept all websocket connections
'websocket_allow_origin': "*"
}
GLOBAL_HANDLERS = [
('xhr_send', transports.XhrSendHandler),
('jsonp_send', transports.JSONPSendHandler)
]
TRANSPORTS = {
'websocket': transports.WebSocketTransport,
'xhr': transports.XhrPollingTransport,
'xhr_streaming': transports.XhrStreamingTransport,
'jsonp': transports.JSONPTransport,
'eventsource': transports.EventSourceTransport,
'htmlfile': transports.HtmlFileTransport
}
STATIC_HANDLERS = {
'/chunking_test': static.ChunkingTestHandler,
'/info': static.InfoHandler,
'/iframe[0-9-.a-z_]*.html': static.IFrameHandler,
'/websocket': transports.RawWebSocketTransport,
'/?': static.GreetingsHandler
}
class SockJSRouter(object):
"""SockJS protocol router"""
def __init__(self,
connection,
prefix='',
user_settings=dict(),
io_loop=None,
session_kls=None):
"""Constructor.
`connection`
SockJSConnection class
`prefix`
Connection prefix
`user_settings`
Settings dictionary
`io_loop`
Optional IOLoop instance
"""
# TODO: Version check
if version_info[0] < 2:
raise Exception('sockjs-tornado requires Tornado 2.0 or higher.')
# Store connection class
self._connection = connection
# Initialize io_loop
self.io_loop = io_loop or ioloop.IOLoop.instance()
# Settings
self.settings = DEFAULT_SETTINGS.copy()
if user_settings:
self.settings.update(user_settings)
self.websockets_enabled = 'websocket' not in self.settings['disabled_transports']
self.cookie_needed = self.settings['jsessionid']
# Sessions
self._session_kls = session_kls if session_kls else session.Session
self._sessions = sessioncontainer.SessionContainer()
check_interval = self.settings['session_check_interval'] * 1000
self._sessions_cleanup = ioloop.PeriodicCallback(self._sessions.expire,
check_interval)
self._sessions_cleanup.start()
# Stats
self.stats = stats.StatsCollector(self.io_loop)
# Initialize URLs
base = prefix + r'/[^/.]+/(?P<session_id>[^/.]+)'
# Generate global handler URLs
self._transport_urls = [('%s/%s$' % (base, p[0]), p[1], dict(server=self))
for p in GLOBAL_HANDLERS]
for k, v in TRANSPORTS.items():
if k in self.settings['disabled_transports']:
continue
# Only version 1 is supported
self._transport_urls.append(
(r'%s/%s$' % (base, k),
v,
dict(server=self))
)
# Generate static URLs
self._transport_urls.extend([('%s%s' % (prefix, k), v, dict(server=self))
for k, v in STATIC_HANDLERS.items()])
@property
def urls(self):
"""List of the URLs to be added to the Tornado application"""
return self._transport_urls
def apply_routes(self, routes):
"""Feed list of the URLs to the routes list. Returns list"""
routes.extend(self._transport_urls)
return routes
def create_session(self, session_id, register=True):
"""Creates new session object and returns it.
`request`
Request that created the session. Will be used to get query string
parameters and cookies
`register`
Should be session registered in a storage. Websockets don't
need it.
"""
# TODO: Possible optimization here for settings.get
s = self._session_kls(self._connection,
self,
session_id,
self.settings.get('disconnect_delay')
)
if register:
self._sessions.add(s)
return s
def get_session(self, session_id):
"""Get session by session id
`session_id`
Session id
"""
return self._sessions.get(session_id)
def get_connection_class(self):
"""Return associated connection class"""
return self._connection
# Broadcast helper
def broadcast(self, clients, msg):
"""Optimized `broadcast` implementation. Depending on type of the session, will json-encode
message once and will call either `send_message` or `send_jsonifed`.
`clients`
Clients iterable
`msg`
Message to send
"""
json_msg = None
count = 0
for c in clients:
sess = c.session
if not sess.is_closed:
if sess.send_expects_json:
if json_msg is None:
json_msg = proto.json_encode(msg)
sess.send_jsonified(json_msg, False)
else:
sess.send_message(msg, stats=False)
count += 1
self.stats.on_pack_sent(count)
|
|
"""
Support to interface with Alexa Devices.
For more details about this platform, please refer to the documentation at
https://community.home-assistant.io/t/echo-devices-alexa-as-media-player-testers-needed/58639
VERSION 0.9.5
"""
import logging
from datetime import timedelta
import requests
import voluptuous as vol
from homeassistant import util
from homeassistant.components.media_player import (
MEDIA_TYPE_MUSIC, PLATFORM_SCHEMA, SUPPORT_NEXT_TRACK,
SUPPORT_PAUSE, SUPPORT_PLAY, SUPPORT_PREVIOUS_TRACK,
SUPPORT_STOP, SUPPORT_TURN_OFF, SUPPORT_VOLUME_MUTE,
SUPPORT_PLAY_MEDIA, SUPPORT_VOLUME_SET,
MediaPlayerDevice, DOMAIN, MEDIA_PLAYER_SCHEMA,
SUPPORT_SELECT_SOURCE)
from homeassistant.const import (
CONF_EMAIL, CONF_PASSWORD, CONF_URL,
STATE_IDLE, STATE_STANDBY, STATE_PAUSED,
STATE_PLAYING)
from homeassistant.helpers import config_validation as cv
from homeassistant.helpers.service import extract_entity_ids
from homeassistant.helpers.event import track_utc_time_change
# from homeassistant.util.json import load_json, save_json
# from homeassistant.util import dt as dt_util
SUPPORT_ALEXA = (SUPPORT_PAUSE | SUPPORT_PREVIOUS_TRACK |
SUPPORT_NEXT_TRACK | SUPPORT_STOP |
SUPPORT_VOLUME_SET | SUPPORT_PLAY |
SUPPORT_PLAY_MEDIA | SUPPORT_TURN_OFF |
SUPPORT_VOLUME_MUTE | SUPPORT_PAUSE |
SUPPORT_SELECT_SOURCE)
_CONFIGURING = []
_LOGGER = logging.getLogger(__name__)
REQUIREMENTS = ['beautifulsoup4==4.6.0', 'simplejson==3.16.0']
MIN_TIME_BETWEEN_SCANS = timedelta(seconds=15)
MIN_TIME_BETWEEN_FORCED_SCANS = timedelta(seconds=1)
ALEXA_DATA = "alexa_media"
SERVICE_ALEXA_TTS = 'alexa_tts'
ATTR_MESSAGE = 'message'
ALEXA_TTS_SCHEMA = MEDIA_PLAYER_SCHEMA.extend({
vol.Required(ATTR_MESSAGE): cv.string,
})
CONF_DEBUG = 'debug'
PLATFORM_SCHEMA = PLATFORM_SCHEMA.extend({
vol.Required(CONF_EMAIL): cv.string,
vol.Required(CONF_PASSWORD): cv.string,
vol.Required(CONF_URL): cv.string,
vol.Optional(CONF_DEBUG, default=False): cv.boolean,
})
def request_configuration(hass, config, setup_platform_callback,
status=None):
"""Request configuration steps from the user."""
configurator = hass.components.configurator
async def configuration_callback(callback_data):
"""Handle the submitted configuration."""
hass.async_add_job(setup_platform_callback, callback_data)
# Get Captcha
if (status and 'captcha_image_url' in status and
status['captcha_image_url'] is not None):
config_id = configurator.request_config(
"Alexa Media Player - Captcha", configuration_callback,
description=('Please enter the text for the captcha.'
' Please enter anything if the image is missing.'
),
description_image=status['captcha_image_url'],
submit_caption="Confirm",
fields=[{'id': 'captcha', 'name': 'Captcha'}]
)
elif (status and 'securitycode_required' in status and
status['securitycode_required']): # Get 2FA code
config_id = configurator.request_config(
"Alexa Media Player - 2FA", configuration_callback,
description=('Please enter your Two-Factor Security code.'),
submit_caption="Confirm",
fields=[{'id': 'securitycode', 'name': 'Security Code'}]
)
elif (status and 'claimspicker_required' in status and
status['claimspicker_required']): # Get picker method
options = status['claimspicker_message']
config_id = configurator.request_config(
"Alexa Media Player - Verification Method", configuration_callback,
description=('Please select the verification method. '
'(e.g., sms or email).<br />{}').format(
options
),
submit_caption="Confirm",
fields=[{'id': 'claimsoption', 'name': 'Option'}]
)
elif (status and 'verificationcode_required' in status and
status['verificationcode_required']): # Get picker method
config_id = configurator.request_config(
"Alexa Media Player - Verification Code", configuration_callback,
description=('Please enter received verification code.'),
submit_caption="Confirm",
fields=[{'id': 'verificationcode', 'name': 'Verification Code'}]
)
else: # Check login
config_id = configurator.request_config(
"Alexa Media Player - Begin", configuration_callback,
description=('Please hit confirm to begin login attempt.'),
submit_caption="Confirm",
fields=[]
)
_CONFIGURING.append(config_id)
if (len(_CONFIGURING) > 0 and 'error_message' in status
and status['error_message']):
configurator.notify_errors( # use sync to delay next pop
_CONFIGURING[len(_CONFIGURING)-1], status['error_message'])
if (len(_CONFIGURING) > 1):
configurator.async_request_done(_CONFIGURING.pop(0))
def setup_platform(hass, config, add_devices_callback,
discovery_info=None):
"""Set up the Alexa platform."""
if ALEXA_DATA not in hass.data:
hass.data[ALEXA_DATA] = {}
email = config.get(CONF_EMAIL)
password = config.get(CONF_PASSWORD)
url = config.get(CONF_URL)
login = AlexaLogin(url, email, password, hass.config.path,
config.get(CONF_DEBUG))
async def setup_platform_callback(callback_data):
_LOGGER.debug(("Status: {} got captcha: {} securitycode: {}"
" Claimsoption: {} VerificationCode: {}").format(
login.status,
callback_data.get('captcha'),
callback_data.get('securitycode'),
callback_data.get('claimsoption'),
callback_data.get('verificationcode')))
login.login(captcha=callback_data.get('captcha'),
securitycode=callback_data.get('securitycode'),
claimsoption=callback_data.get('claimsoption'),
verificationcode=callback_data.get('verificationcode'))
testLoginStatus(hass, config, add_devices_callback, login,
setup_platform_callback)
testLoginStatus(hass, config, add_devices_callback, login,
setup_platform_callback)
def testLoginStatus(hass, config, add_devices_callback, login,
setup_platform_callback):
"""Test the login status."""
if 'login_successful' in login.status and login.status['login_successful']:
_LOGGER.debug("Setting up Alexa devices")
hass.async_add_job(setup_alexa, hass, config,
add_devices_callback, login)
return
elif ('captcha_required' in login.status and
login.status['captcha_required']):
_LOGGER.debug("Creating configurator to request captcha")
elif ('securitycode_required' in login.status and
login.status['securitycode_required']):
_LOGGER.debug("Creating configurator to request 2FA")
elif ('claimspicker_required' in login.status and
login.status['claimspicker_required']):
_LOGGER.debug("Creating configurator to select verification option")
elif ('verificationcode_required' in login.status and
login.status['verificationcode_required']):
_LOGGER.debug("Creating configurator to enter verification code")
elif ('login_failed' in login.status and
login.status['login_failed']):
_LOGGER.debug("Creating configurator to start new login attempt")
hass.async_add_job(request_configuration, hass, config,
setup_platform_callback,
login.status)
def setup_alexa(hass, config, add_devices_callback, login_obj):
"""Set up a alexa api based on host parameter."""
alexa_clients = hass.data[ALEXA_DATA]
# alexa_sessions = {}
track_utc_time_change(hass, lambda now: update_devices(), second=30)
url = config.get(CONF_URL)
@util.Throttle(MIN_TIME_BETWEEN_SCANS, MIN_TIME_BETWEEN_FORCED_SCANS)
def update_devices():
"""Update the devices objects."""
devices = AlexaAPI.get_devices(url, login_obj._session)
bluetooth = AlexaAPI.get_bluetooth(url, login_obj._session)
if ((devices is None or bluetooth is None)
and len(_CONFIGURING) == 0):
_LOGGER.debug("Alexa API disconnected; attempting to relogin")
login_obj.login_with_cookie()
new_alexa_clients = []
available_client_ids = []
for device in devices:
for b_state in bluetooth['bluetoothStates']:
if device['serialNumber'] == b_state['deviceSerialNumber']:
device['bluetooth_state'] = b_state
available_client_ids.append(device['serialNumber'])
if device['serialNumber'] not in alexa_clients:
new_client = AlexaClient(config, login_obj._session, device,
update_devices, url)
alexa_clients[device['serialNumber']] = new_client
new_alexa_clients.append(new_client)
elif device['online']:
alexa_clients[device['serialNumber']].refresh(device)
if new_alexa_clients:
def tts_handler(call):
for alexa in service_to_entities(call):
if call.service == SERVICE_ALEXA_TTS:
message = call.data.get(ATTR_MESSAGE)
alexa.send_tts(message)
def service_to_entities(call):
"""Return the known devices that a service call mentions."""
entity_ids = extract_entity_ids(hass, call)
if entity_ids:
entities = [entity for entity in new_alexa_clients
if entity.entity_id in entity_ids]
else:
entities = None
return entities
hass.services.register(DOMAIN, SERVICE_ALEXA_TTS, tts_handler,
schema=ALEXA_TTS_SCHEMA)
add_devices_callback(new_alexa_clients)
update_devices()
# Clear configurator. We delay till here to avoid leaving a modal orphan
global _CONFIGURING
for config_id in _CONFIGURING:
configurator = hass.components.configurator
configurator.async_request_done(config_id)
_CONFIGURING = []
class AlexaClient(MediaPlayerDevice):
"""Representation of a Alexa device."""
def __init__(self, config, session, device, update_devices, url):
"""Initialize the Alexa device."""
# Class info
self.alexa_api = AlexaAPI(self, session, url)
self.update_devices = update_devices
# Device info
self._device = None
self._device_name = None
self._device_serial_number = None
self._device_type = None
self._device_family = None
self._device_owner_customer_id = None
self._software_version = None
self._available = None
self._capabilities = []
# Media
self._session = None
self._media_duration = None
self._media_image_url = None
self._media_title = None
self._media_pos = None
self._media_album_name = None
self._media_artist = None
self._player_state = None
self._media_is_muted = None
self._media_vol_level = None
self._previous_volume = None
self._source = None
self._source_list = []
self.refresh(device)
def _clear_media_details(self):
"""Set all Media Items to None."""
# General
self._media_duration = None
self._media_image_url = None
self._media_title = None
self._media_pos = None
self._media_album_name = None
self._media_artist = None
self._media_player_state = None
self._media_is_muted = None
self._media_vol_level = None
def refresh(self, device):
"""Refresh key device data."""
self._device = device
self._device_name = device['accountName']
self._device_family = device['deviceFamily']
self._device_type = device['deviceType']
self._device_serial_number = device['serialNumber']
self._device_owner_customer_id = device['deviceOwnerCustomerId']
self._software_version = device['softwareVersion']
self._available = device['online']
self._capabilities = device['capabilities']
self._bluetooth_state = device['bluetooth_state']
self._source = self._get_source()
self._source_list = self._get_source_list()
session = self.alexa_api.get_state()
self._clear_media_details()
# update the session if it exists; not doing relogin here
if session is not None:
self._session = session
if 'playerInfo' in self._session:
self._session = self._session['playerInfo']
if self._session['state'] is not None:
self._media_player_state = self._session['state']
self._media_pos = (self._session['progress']['mediaProgress']
if (self._session['progress'] is not None
and 'mediaProgress' in
self._session['progress'])
else None)
self._media_is_muted = (self._session['volume']['muted']
if (self._session['volume'] is not None
and 'muted' in
self._session['volume'])
else None)
self._media_vol_level = (self._session['volume']
['volume'] / 100
if(self._session['volume'] is not None
and 'volume' in
self._session['volume'])
else None)
self._media_title = (self._session['infoText']['title']
if (self._session['infoText'] is not None
and 'title' in
self._session['infoText'])
else None)
self._media_artist = (self._session['infoText']['subText1']
if (self._session['infoText'] is not None
and 'subText1' in
self._session['infoText'])
else None)
self._media_album_name = (self._session['infoText']['subText2']
if (self._session['infoText'] is not
None and 'subText2' in
self._session['infoText'])
else None)
self._media_image_url = (self._session['mainArt']['url']
if (self._session['mainArt'] is not
None and 'url' in
self._session['mainArt'])
else None)
self._media_duration = (self._session['progress']
['mediaLength']
if (self._session['progress'] is not
None and 'mediaLength' in
self._session['progress'])
else None)
@property
def source(self):
"""Return the current input source."""
return self._source
@property
def source_list(self):
"""List of available input sources."""
return self._source_list
def select_source(self, source):
"""Select input source."""
if source == 'Local Speaker':
self.alexa_api.disconnect_bluetooth()
self._source = 'Local Speaker'
elif self._bluetooth_state['pairedDeviceList'] is not None:
for devices in self._bluetooth_state['pairedDeviceList']:
if devices['friendlyName'] == source:
self.alexa_api.set_bluetooth(devices['address'])
self._source = source
def _get_source(self):
source = 'Local Speaker'
if self._bluetooth_state['pairedDeviceList'] is not None:
for device in self._bluetooth_state['pairedDeviceList']:
if device['connected'] is True:
return device['friendlyName']
return source
def _get_source_list(self):
sources = []
if self._bluetooth_state['pairedDeviceList'] is not None:
for devices in self._bluetooth_state['pairedDeviceList']:
sources.append(devices['friendlyName'])
return ['Local Speaker'] + sources
@property
def available(self):
"""Return the availability of the client."""
return self._available
@property
def unique_id(self):
"""Return the id of this Alexa client."""
return self.device_serial_number
@property
def name(self):
"""Return the name of the device."""
return self._device_name
@property
def device_serial_number(self):
"""Return the machine identifier of the device."""
return self._device_serial_number
@property
def device(self):
"""Return the device, if any."""
return self._device
@property
def session(self):
"""Return the session, if any."""
return self._session
@property
def state(self):
"""Return the state of the device."""
if self._media_player_state == 'PLAYING':
return STATE_PLAYING
elif self._media_player_state == 'PAUSED':
return STATE_PAUSED
elif self._media_player_state == 'IDLE':
return STATE_IDLE
return STATE_STANDBY
def update(self):
"""Get the latest details."""
self.update_devices(no_throttle=True)
@property
def media_content_type(self):
"""Return the content type of current playing media."""
if self.state in [STATE_PLAYING, STATE_PAUSED]:
return MEDIA_TYPE_MUSIC
return STATE_STANDBY
@property
def media_artist(self):
"""Return the artist of current playing media, music track only."""
return self._media_artist
@property
def media_album_name(self):
"""Return the album name of current playing media, music track only."""
return self._media_album_name
@property
def media_duration(self):
"""Return the duration of current playing media in seconds."""
return self._media_duration
@property
def media_image_url(self):
"""Return the image URL of current playing media."""
return self._media_image_url
@property
def media_title(self):
"""Return the title of current playing media."""
return self._media_title
@property
def device_family(self):
"""Return the make of the device (ex. Echo, Other)."""
return self._device_family
@property
def supported_features(self):
"""Flag media player features that are supported."""
return SUPPORT_ALEXA
def set_volume_level(self, volume):
"""Set volume level, range 0..1."""
if not (self.state in [STATE_PLAYING, STATE_PAUSED]
and self.available):
return
self.alexa_api.set_volume(volume)
self._media_vol_level = volume
@property
def volume_level(self):
"""Return the volume level of the client (0..1)."""
return self._media_vol_level
@property
def is_volume_muted(self):
"""Return boolean if volume is currently muted."""
if self.volume_level == 0:
return True
return False
def mute_volume(self, mute):
"""Mute the volume.
Since we can't actually mute, we'll:
- On mute, store volume and set volume to 0
- On unmute, set volume to previously stored volume
"""
if not (self.state == STATE_PLAYING and self.available):
return
self._media_is_muted = mute
if mute:
self._previous_volume = self.volume_level
self.alexa_api.set_volume(0)
else:
if self._previous_volume is not None:
self.alexa_api.set_volume(self._previous_volume)
else:
self.alexa_api.set_volume(50)
def media_play(self):
"""Send play command."""
if not (self.state in [STATE_PLAYING, STATE_PAUSED]
and self.available):
return
self.alexa_api.play()
def media_pause(self):
"""Send pause command."""
if not (self.state in [STATE_PLAYING, STATE_PAUSED]
and self.available):
return
self.alexa_api.pause()
def turn_off(self):
"""Turn the client off."""
# Fake it since we can't turn the client off
self.media_pause()
def media_next_track(self):
"""Send next track command."""
if not (self.state in [STATE_PLAYING, STATE_PAUSED]
and self.available):
return
self.alexa_api.next()
def media_previous_track(self):
"""Send previous track command."""
if not (self.state in [STATE_PLAYING, STATE_PAUSED]
and self.available):
return
self.alexa_api.previous()
def send_tts(self, message):
"""Send TTS to Device NOTE: Does not work on WHA Groups."""
self.alexa_api.send_tts(message)
def play_media(self, media_type, media_id, **kwargs):
"""Send the play_media command to the media player."""
if media_type == "music":
self.alexa_api.send_tts("Sorry, text to speech can only be called "
" with the media player alexa tts service")
else:
self.alexa_api.play_music(media_type, media_id)
@property
def device_state_attributes(self):
"""Return the scene state attributes."""
attr = {
'available': self._available,
}
return attr
class AlexaLogin():
"""Class to handle login connection to Alexa."""
def __init__(self, url, email, password, configpath, debug=False):
"""Set up initial connection and log in."""
self._url = url
self._email = email
self._password = password
self._session = None
self._data = None
self.status = {}
self._cookiefile = configpath("{}.pickle".format(ALEXA_DATA))
self._debugpost = configpath("{}post.html".format(ALEXA_DATA))
self._debugget = configpath("{}get.html".format(ALEXA_DATA))
self._lastreq = None
self._debug = debug
self.login_with_cookie()
def login_with_cookie(self):
"""Attempt to login after loading cookie."""
import pickle
cookies = None
if (self._cookiefile):
try:
_LOGGER.debug(
"Trying cookie from file {}".format(
self._cookiefile))
with open(self._cookiefile, 'rb') as myfile:
cookies = pickle.load(myfile)
_LOGGER.debug("cookie loaded: {}".format(cookies))
except Exception as ex:
template = ("An exception of type {0} occurred."
" Arguments:\n{1!r}")
message = template.format(type(ex).__name__, ex.args)
_LOGGER.debug(
"Error loading pickled cookie from {}: {}".format(
self._cookiefile, message))
self.login(cookies=cookies)
def reset_login(self):
"""Remove data related to existing login."""
self._session = None
self._data = None
self._lastreq = None
self.status = {}
def get_inputs(self, soup, searchfield={'name': 'signIn'}):
"""Parse soup for form with searchfield."""
data = {}
form = soup.find('form', searchfield)
for field in form.find_all('input'):
try:
data[field['name']] = ""
data[field['name']] = field['value']
except: # noqa: E722 pylint: disable=bare-except
pass
return data
def test_loggedin(self, cookies=None):
"""Function that will test the connection is logged in.
Attempts to get device list, and if unsuccessful login failed
"""
if self._session is None:
'''initiate session'''
self._session = requests.Session()
'''define session headers'''
self._session.headers = {
'User-Agent': ('Mozilla/5.0 (Windows NT 6.3; Win64; x64) '
'AppleWebKit/537.36 (KHTML, like Gecko) '
'Chrome/68.0.3440.106 Safari/537.36'),
'Accept': ('text/html,application/xhtml+xml, '
'application/xml;q=0.9,*/*;q=0.8'),
'Accept-Language': '*'
}
self._session.cookies = cookies
get_resp = self._session.get('https://alexa.' + self._url +
'/api/devices-v2/device')
# with open(self._debugget, mode='wb') as localfile:
# localfile.write(get_resp.content)
try:
from json.decoder import JSONDecodeError
from simplejson import JSONDecodeError as SimpleJSONDecodeError
# Need to catch both as Python 3.5 appears to use simplejson
except ImportError:
JSONDecodeError = ValueError
try:
get_resp.json()
except (JSONDecodeError, SimpleJSONDecodeError) as ex:
# ValueError is necessary for Python 3.5 for some reason
template = ("An exception of type {0} occurred."
" Arguments:\n{1!r}")
message = template.format(type(ex).__name__, ex.args)
_LOGGER.debug("Not logged in: {}".format(message))
return False
_LOGGER.debug("Logged in.")
return True
def login(self, cookies=None, captcha=None, securitycode=None,
claimsoption=None, verificationcode=None):
"""Login to Amazon."""
from bs4 import BeautifulSoup
import pickle
if (cookies is not None and self.test_loggedin(cookies)):
_LOGGER.debug("Using cookies to log in")
self.status = {}
self.status['login_successful'] = True
_LOGGER.debug("Log in successful with cookies")
return
else:
_LOGGER.debug("No valid cookies for log in; using credentials")
# site = 'https://www.' + self._url + '/gp/sign-in.html'
# use alexa site instead
site = 'https://alexa.' + self._url + '/api/devices-v2/device'
if self._session is None:
'''initiate session'''
self._session = requests.Session()
'''define session headers'''
self._session.headers = {
'User-Agent': ('Mozilla/5.0 (Windows NT 6.3; Win64; x64) '
'AppleWebKit/537.36 (KHTML, like Gecko) '
'Chrome/68.0.3440.106 Safari/537.36'),
'Accept': ('text/html,application/xhtml+xml, '
'application/xml;q=0.9,*/*;q=0.8'),
'Accept-Language': '*'
}
if self._lastreq is not None:
site = self._lastreq.url
_LOGGER.debug("Loaded last request to {} ".format(site))
html = self._lastreq.text
'''get BeautifulSoup object of the html of the login page'''
if self._debug:
with open(self._debugget, mode='wb') as localfile:
localfile.write(self._lastreq.content)
soup = BeautifulSoup(html, 'html.parser')
site = soup.find('form').get('action')
if site is None:
site = self._lastreq.url
elif site == 'verify':
import re
site = re.search(r'(.+)/(.*)',
self._lastreq.url).groups()[0] + "/verify"
if self._data is None:
resp = self._session.get(site)
self._lastreq = resp
if resp.history:
_LOGGER.debug("Get to {} was redirected to {}".format(
site,
resp.url))
self._session.headers['Referer'] = resp.url
else:
_LOGGER.debug("Get to {} was not redirected".format(site))
self._session.headers['Referer'] = site
html = resp.text
'''get BeautifulSoup object of the html of the login page'''
if self._debug:
with open(self._debugget, mode='wb') as localfile:
localfile.write(resp.content)
soup = BeautifulSoup(html, 'html.parser')
'''scrape login page to get all the inputs required for login'''
self._data = self.get_inputs(soup)
site = soup.find('form', {'name': 'signIn'}).get('action')
# _LOGGER.debug("Init Form Data: {}".format(self._data))
'''add username and password to the data for post request'''
'''check if there is an input field'''
if "email" in self._data:
self._data['email'] = self._email.encode('utf-8')
if "password" in self._data:
self._data['password'] = self._password.encode('utf-8')
if "rememberMe" in self._data:
self._data['rememberMe'] = "true".encode('utf-8')
status = {}
_LOGGER.debug(("Preparing post to {} Captcha: {}"
" SecurityCode: {} Claimsoption: {} "
"VerificationCode: {}").format(
site,
captcha,
securitycode,
claimsoption,
verificationcode
))
if (captcha is not None and 'guess' in self._data):
self._data['guess'] = captcha.encode('utf-8')
if (securitycode is not None and 'otpCode' in self._data):
self._data['otpCode'] = securitycode.encode('utf-8')
self._data['rememberDevice'] = ""
if (claimsoption is not None and 'option' in self._data):
self._data['option'] = claimsoption.encode('utf-8')
if (verificationcode is not None and 'code' in self._data):
self._data['code'] = verificationcode.encode('utf-8')
self._session.headers['Content-Type'] = ("application/x-www-form-"
"urlencoded; charset=utf-8")
self._data.pop('', None)
if self._debug:
_LOGGER.debug("Cookies: {}".format(self._session.cookies))
_LOGGER.debug("Submit Form Data: {}".format(self._data))
_LOGGER.debug("Header: {}".format(self._session.headers))
'''submit post request with username/password and other needed info'''
post_resp = self._session.post(site, data=self._data)
self._session.headers['Referer'] = site
self._lastreq = post_resp
if self._debug:
with open(self._debugpost, mode='wb') as localfile:
localfile.write(post_resp.content)
post_soup = BeautifulSoup(post_resp.content, 'html.parser')
login_tag = post_soup.find('form', {'name': 'signIn'})
captcha_tag = post_soup.find(id="auth-captcha-image")
'''another login required and no captcha request? try once more.
This is a necessary hack as the first attempt always fails.
TODO: Figure out how to remove this hack
'''
if (login_tag is not None and captcha_tag is None):
login_url = login_tag.get("action")
_LOGGER.debug("Performing second login to: {}".format(
login_url))
post_resp = self._session.post(login_url,
data=self._data)
if self._debug:
with open(self._debugpost, mode='wb') as localfile:
localfile.write(post_resp.content)
post_soup = BeautifulSoup(post_resp.content, 'html.parser')
login_tag = post_soup.find('form', {'name': 'signIn'})
captcha_tag = post_soup.find(id="auth-captcha-image")
securitycode_tag = post_soup.find(id="auth-mfa-otpcode")
errorbox = (post_soup.find(id="auth-error-message-box")
if post_soup.find(id="auth-error-message-box") else
post_soup.find(id="auth-warning-message-box"))
claimspicker_tag = post_soup.find('form', {'name': 'claimspicker'})
verificationcode_tag = post_soup.find('form', {'action': 'verify'})
'''pull out Amazon error message'''
if errorbox:
error_message = errorbox.find('h4').string
for li in errorbox.findAll('li'):
error_message += li.find('span').string
_LOGGER.debug("Error message: {}".format(error_message))
status['error_message'] = error_message
if captcha_tag is not None:
_LOGGER.debug("Captcha requested")
status['captcha_required'] = True
status['captcha_image_url'] = captcha_tag.get('src')
self._data = self.get_inputs(post_soup)
elif securitycode_tag is not None:
_LOGGER.debug("2FA requested")
status['securitycode_required'] = True
self._data = self.get_inputs(post_soup, {'id': 'auth-mfa-form'})
elif claimspicker_tag is not None:
claims_message = ""
options_message = ""
for div in claimspicker_tag.findAll('div', 'a-row'):
claims_message += "{}\n".format(div.string)
for label in claimspicker_tag.findAll('label'):
value = (label.find('input')['value']) if label.find(
'input') else ""
message = (label.find('span').string) if label.find(
'span') else ""
valuemessage = ("Option: {} = `{}`.\n".format(
value, message)) if value != "" else ""
options_message += valuemessage
_LOGGER.debug("Verification method requested: {}".format(
claims_message, options_message))
status['claimspicker_required'] = True
status['claimspicker_message'] = options_message
self._data = self.get_inputs(post_soup, {'name': 'claimspicker'})
elif verificationcode_tag is not None:
_LOGGER.debug("Verification code requested:")
status['verificationcode_required'] = True
self._data = self.get_inputs(post_soup, {'action': 'verify'})
elif login_tag is not None:
login_url = login_tag.get("action")
_LOGGER.debug("Another login requested to: {}".format(
login_url))
status['login_failed'] = True
else:
_LOGGER.debug("Captcha/2FA not requested; confirming login.")
if self.test_loggedin():
_LOGGER.debug("Login confirmed; saving cookie to {}".format(
self._cookiefile))
status['login_successful'] = True
with open(self._cookiefile, 'wb') as myfile:
try:
pickle.dump(self._session.cookies, myfile)
except Exception as ex:
template = ("An exception of type {0} occurred."
" Arguments:\n{1!r}")
message = template.format(type(ex).__name__, ex.args)
_LOGGER.debug(
"Error saving pickled cookie to {}: {}".format(
self._cookiefile,
message))
else:
_LOGGER.debug("Login failed; check credentials")
status['login_failed'] = True
self.status = status
class AlexaAPI():
"""Class for accessing Alexa."""
def __init__(self, device, session, url):
"""Initialize Alexa device."""
self._device = device
self._session = session
self._url = 'https://alexa.' + url
csrf = self._session.cookies.get_dict()['csrf']
self._session.headers['csrf'] = csrf
def _post_request(self, uri, data):
try:
self._session.post(self._url + uri, json=data)
except Exception as ex:
template = ("An exception of type {0} occurred."
" Arguments:\n{1!r}")
message = template.format(type(ex).__name__, ex.args)
_LOGGER.error("An error occured accessing the API: {}".format(
message))
def _get_request(self, uri, data=None):
try:
return self._session.get(self._url + uri, json=data)
except Exception as ex:
template = ("An exception of type {0} occurred."
" Arguments:\n{1!r}")
message = template.format(type(ex).__name__, ex.args)
_LOGGER.error("An error occured accessing the API: {}".format(
message))
return None
def play_music(self, provider_id, search_phrase):
"""Play Music based on search."""
data = {
"behaviorId": "PREVIEW",
"sequenceJson": "{\"@type\": \
\"com.amazon.alexa.behaviors.model.Sequence\", \
\"startNode\":{\"@type\": \
\"com.amazon.alexa.behaviors.model.OpaquePayloadOperationNode\", \
\"type\":\"Alexa.Music.PlaySearchPhrase\",\"operationPayload\": \
{\"deviceType\":\"" + self._device._device_type + "\", \
\"deviceSerialNumber\":\"" + self._device.unique_id +
"\",\"locale\":\"en-US\", \
\"customerId\":\"" + self._device._device_owner_customer_id +
"\", \"searchPhrase\": \"" + search_phrase + "\", \
\"sanitizedSearchPhrase\": \"" + search_phrase + "\", \
\"musicProviderId\": \"" + provider_id + "\"}}}",
"status": "ENABLED"
}
self._post_request('/api/behaviors/preview',
data=data)
def send_tts(self, message):
"""Send message for TTS at speaker."""
data = {
"behaviorId": "PREVIEW",
"sequenceJson": "{\"@type\": \
\"com.amazon.alexa.behaviors.model.Sequence\", \
\"startNode\":{\"@type\": \
\"com.amazon.alexa.behaviors.model.OpaquePayloadOperationNode\", \
\"type\":\"Alexa.Speak\",\"operationPayload\": \
{\"deviceType\":\"" + self._device._device_type + "\", \
\"deviceSerialNumber\":\"" + self._device.unique_id +
"\",\"locale\":\"en-US\", \
\"customerId\":\"" + self._device._device_owner_customer_id +
"\", \"textToSpeak\": \"" + message + "\"}}}",
"status": "ENABLED"
}
self._post_request('/api/behaviors/preview',
data=data)
def set_media(self, data):
"""Select the media player."""
self._post_request('/api/np/command?deviceSerialNumber=' +
self._device.unique_id + '&deviceType=' +
self._device._device_type, data=data)
def previous(self):
"""Play previous."""
self.set_media({"type": "PreviousCommand"})
def next(self):
"""Play next."""
self.set_media({"type": "NextCommand"})
def pause(self):
"""Pause."""
self.set_media({"type": "PauseCommand"})
def play(self):
"""Play."""
self.set_media({"type": "PlayCommand"})
def set_volume(self, volume):
"""Set volume."""
self.set_media({"type": "VolumeLevelCommand",
"volumeLevel": volume*100})
def get_state(self):
"""Get state."""
try:
response = self._get_request('/api/np/player?deviceSerialNumber=' +
self._device.unique_id +
'&deviceType=' +
self._device._device_type +
'&screenWidth=2560')
return response.json()
except Exception as ex:
template = ("An exception of type {0} occurred."
" Arguments:\n{1!r}")
message = template.format(type(ex).__name__, ex.args)
_LOGGER.error("An error occured accessing the API: {}".format(
message))
return None
@staticmethod
def get_bluetooth(url, session):
"""Get paired bluetooth devices."""
try:
response = session.get('https://alexa.' + url +
'/api/bluetooth?cached=false')
return response.json()
except Exception as ex:
template = ("An exception of type {0} occurred."
" Arguments:\n{1!r}")
message = template.format(type(ex).__name__, ex.args)
_LOGGER.error("An error occured accessing the API: {}".format(
message))
return None
def set_bluetooth(self, mac):
"""Pair with bluetooth device with mac address."""
self._post_request('/api/bluetooth/pair-sink/' +
self._device._device_type + '/' +
self._device.unique_id,
data={"bluetoothDeviceAddress": mac})
def disconnect_bluetooth(self):
"""Disconnect all bluetooth devices."""
self._post_request('/api/bluetooth/disconnect-sink/' +
self._device._device_type + '/' +
self._device.unique_id, data=None)
@staticmethod
def get_devices(url, session):
"""Identify all Alexa devices."""
try:
response = session.get('https://alexa.' + url +
'/api/devices-v2/device')
return response.json()['devices']
except Exception as ex:
template = ("An exception of type {0} occurred."
" Arguments:\n{1!r}")
message = template.format(type(ex).__name__, ex.args)
_LOGGER.error("An error occured accessing the API: {}".format(
message))
return None
|
|
import unittest
from ndio.remote.neurodata import neurodata as nd
import numpy
import json
import test_settings
class TestResourcesApi(unittest.TestCase):
def setUp(self):
self.token_user = test_settings.NEURODATA
hostname = test_settings.HOSTNAME
self.nd = nd(user_token=self.token_user, hostname=hostname)
def test_create_dataset(self):
result = self.nd.create_dataset('test', 1, 1, 1, 1.0, 1.0, 1.0)
self.assertEqual(result, True)
self.nd.delete_dataset('test')
result = self.nd.create_dataset(
'test', 1, 1, 1, 1.0, 1.0, 1.0, is_public=1)
self.assertEqual(result, True)
self.nd.delete_dataset('test')
def test_get_dataset(self):
self.nd.create_dataset('test', 1, 1, 1, 1.0, 1.0, 1.0, 0)
result = self.nd.get_dataset('test')
compare_dict = {
u'yimagesize': 1,
u'ximagesize': 1,
u'zoffset': 0,
u'scalingoption': 0,
u'zimagesize': 1,
u'yoffset': 0,
u'dataset_description': u'',
u'scalinglevels': 0,
u'user': 1,
u'yvoxelres': 1.0,
u'zvoxelres': 1.0,
u'dataset_name': u'test',
u'public': 0,
u'xoffset': 0,
u'xvoxelres': 1.0
}
for key in compare_dict:
self.assertEqual(result[key], compare_dict[key])
self.nd.delete_dataset('test')
def test_list_datasets(self):
test_dataset_names = [u'test1', u'test2', u'test3']
self.nd.create_dataset('test1', 1, 1, 1, 1.0, 1.0, 1.0, is_public=1)
self.nd.create_dataset('test2', 1, 1, 1, 1.0, 1.0, 1.0, is_public=1)
self.nd.create_dataset('test3', 1, 1, 1, 1.0, 1.0, 1.0, is_public=1)
test_dataset_list = self.nd.list_datasets(False)
for name in test_dataset_names:
self.assertEqual(name in test_dataset_list, True)
self.nd.delete_dataset('test1')
self.nd.delete_dataset('test2')
self.nd.delete_dataset('test3')
def test_delete_dataset(self):
self.nd.create_dataset('test', 1, 1, 1, 1.0, 1.0, 1.0, 0)
result = self.nd.delete_dataset('test')
self.assertEqual(result, True)
def test_create_project(self):
self.nd.create_dataset('test', 1, 1, 1, 1.0, 1.0, 1.0, 0)
result = self.nd.create_project(
'testp', 'test', 'localhost', 0)
self.assertEqual(result, True)
self.nd.delete_project('testp', 'test')
self.nd.delete_dataset('test')
def test_get_project(self):
self.nd.create_dataset('test', 1, 1, 1, 1.0, 1.0, 1.0, 0)
self.nd.create_project(
'testp', 'test', 'localhost', 1)
result = self.nd.get_project('testp', 'test')
compare_dict = {u'kvengine': u'MySQL',
u'mdengine': u'MySQL',
u'project_name': u'testp',
u'dataset': u'test',
u'host': u'localhost',
u'project_description': u'',
u'user': 1,
u'public': 1,
u'kvserver': u'localhost'}
for key in compare_dict:
self.assertEqual(result[key], compare_dict[key])
self.nd.delete_project('testp', 'test')
self.nd.delete_dataset('test')
def test_delete_project(self):
self.nd.create_dataset('test', 1, 1, 1, 1.0, 1.0, 1.0, 0)
self.nd.create_project(
'testp', 'test', 'localhost', 1)
result = self.nd.delete_project('testp', 'test')
self.assertEqual(result, True)
self.nd.delete_dataset('test')
def test_list_projects(self):
self.nd.create_dataset('test', 1, 1, 1, 1.0, 1.0, 1.0, 0)
test_project_names = [u'test1p', u'test2p', u'test3p']
self.nd.create_project(
'test1p', 'test', 'localhost', 1)
self.nd.create_project(
'test2p', 'test', 'localhost', 1)
self.nd.create_project(
'test3p', 'test', 'localhost', 1)
test_project_list = self.nd.list_projects('test')
for name in test_project_names:
self.assertEqual(name in test_project_list, True)
self.nd.delete_project('test1p', 'test')
self.nd.delete_project('test2p', 'test')
self.nd.delete_project('test3p', 'test')
self.nd.delete_dataset('test')
def test_create_channel(self):
self.nd.create_dataset('test', 1, 1, 1, 1.0, 1.0, 1.0, 0)
self.nd.create_project(
'testp', 'test', 'localhost', 1)
result = self.nd.create_channel(
'testc', 'testp', 'test', 'image', 'uint8', 0, 500, 0)
self.assertEqual(result, True)
self.nd.delete_channel('testc', 'testp', 'test')
self.nd.delete_project('testp', 'test')
self.nd.delete_dataset('test')
def test_get_channel(self):
self.nd.create_dataset('test', 1, 1, 1, 1.0, 1.0, 1.0, 0)
self.nd.create_project(
'testp', 'test', 'localhost', 1)
self.nd.create_channel('testc', 'testp', 'test',
'image', 'uint8', 0, 500, 0)
result = self.nd.get_channel('testc', 'testp', 'test')
compare_dict = {u'channel_description': u'',
u'endwindow': 500,
u'channel_name': u'testc',
u'default': False,
u'project': u'testp',
u'startwindow': 0,
u'channel_type': u'image',
u'header': u'',
u'readonly': 0,
u'propagate': 0,
u'starttime': 0,
u'exceptions': 0,
u'channel_datatype': u'uint8',
u'endtime': 0,
u'resolution': 0}
for key in compare_dict:
self.assertEqual(result[key], compare_dict[key])
self.nd.delete_channel('testc', 'testp', 'test')
self.nd.delete_project('testp', 'test')
self.nd.delete_dataset('test')
def test_delete_channel(self):
self.nd.create_dataset('test', 1, 1, 1, 1.0, 1.0, 1.0, 0)
self.nd.create_project(
'testp', 'test', 'localhost', 1)
self.nd.create_channel('testc', 'testp', 'test',
'image', 'uint8', 0, 500, 0)
result = self.nd.delete_channel('testc', 'testp', 'test')
self.assertEqual(result, True)
self.nd.delete_project('testp', 'test')
self.nd.delete_dataset('test')
def test_create_token(self):
self.nd.create_dataset('test', 1, 1, 1, 1.0, 1.0, 1.0, 0)
self.nd.create_project(
'testp', 'test', 'localhost', 1)
result = self.nd.create_token('testt', 'testp', 'test', 1)
self.assertEqual(result, True)
self.nd.delete_token('testt', 'testp', 'test')
self.nd.delete_project('testp', 'test')
self.nd.delete_dataset('test')
def test_get_token(self):
self.nd.create_dataset('test', 1, 1, 1, 1.0, 1.0, 1.0, 0)
self.nd.create_project(
'testp', 'test', 'localhost', 1)
self.nd.create_token('testt', 'testp', 'test', 1)
result = self.nd.get_token('testt', 'testp', 'test')
compare_dict = {u'token_name': 'testt',
u'public': 1}
for key in compare_dict:
self.assertEqual(result[key], compare_dict[key])
self.nd.delete_token('testt', 'testp', 'test')
self.nd.delete_project('testp', 'test')
self.nd.delete_dataset('test')
def test_delete_token(self):
self.nd.create_dataset('test', 1, 1, 1, 1.0, 1.0, 1.0, 0)
self.nd.create_project(
'testp', 'test', 'localhost', 1)
self.nd.create_token('testt', 'testp', 'test', 1)
result = self.nd.delete_token('testt', 'testp', 'test')
self.assertEqual(result, True)
self.nd.delete_project('testp', 'test')
self.nd.delete_dataset('test')
def test_list_token(self):
self.nd.create_dataset('test', 1, 1, 1, 1.0, 1.0, 1.0, 0)
test_token_names = [u'test1t', u'test2t', u'test3t']
self.nd.create_project(
'test1p', 'test', 'localhost', 1)
self.nd.create_project(
'test2p', 'test', 'localhost', 1)
self.nd.create_token('test1t', 'test1p', 'test', 1)
self.nd.create_token('test2t', 'test1p', 'test', 1)
self.nd.create_token('test3t', 'test2p', 'test', 1)
test_token_list = self.nd.list_tokens()
for name in test_token_names:
self.assertEqual(name in test_token_list, True)
self.nd.delete_token('test1t', 'test1p', 'test')
self.nd.delete_token('test2t', 'test1p', 'test')
self.nd.delete_token('test3t', 'test2p', 'test')
self.nd.delete_project('test1p', 'test')
self.nd.delete_project('test2p', 'test')
self.nd.delete_dataset('test')
if __name__ == '__main__':
unittest.main()
|
|
# coding=utf-8
# Distributed under the MIT software license, see the accompanying
# file LICENSE or http://www.opensource.org/licenses/mit-license.php.
import random
import time
from collections import Counter, defaultdict
from functools import reduce
from pyqrllib.pyqrllib import bin2hstr, sha2_256
from twisted.internet import reactor
import qrl.core.Transaction_subtypes
from qrl.core import logger, config, fork
from qrl.core.GenesisBlock import GenesisBlock
from qrl.core.fork import fork_recovery
from qrl.core.messagereceipt import MessageReceipt
from qrl.core.nstate import NState
from qrl.core.Transaction import StakeTransaction
from qrl.crypto.hashchain import hashchain
from qrl.crypto.misc import sha256
class NodeState:
def __init__(self):
self.state = NState.unsynced
self.epoch_diff = -1
class POS:
def __init__(self, chain, p2pFactory, nodeState, ntp):
self.master_mr = MessageReceipt()
self.nodeState = nodeState
self.ntp = ntp
self.chain = chain
self.r1_time_diff = defaultdict(list)
self.r2_time_diff = defaultdict(list)
self.pos_blocknum = 0
self.pos_callLater = None
self.incoming_blocks = {}
self.last_pos_cycle = 0
self.last_selected_height = 0
self.last_bk_time = 0
self.last_pb_time = 0
self.next_header_hash = None
self.next_block_number = None
self.fmbh_allowed_peers = {}
self.fmbh_blockhash_peers = {}
self.p2pFactory = p2pFactory
def update_node_state(self, state):
self.nodeState.state = state
logger.info('Status changed to %s', self.nodeState.state)
if self.nodeState.state == NState.synced:
self.nodeState.epoch_diff = 0
self.last_pos_cycle = time.time()
self.restart_post_block_logic()
elif self.nodeState.state == NState.unsynced:
self.last_bk_time = time.time()
self.restart_unsynced_logic()
elif self.nodeState.state == NState.forked:
self.stop_post_block_logic()
elif self.nodeState.state == NState.syncing:
self.last_pb_time = time.time()
def stop_monitor_bk(self):
try:
reactor.monitor_bk.cancel()
except Exception: # No need to log this exception
pass
def restart_monitor_bk(self, delay=60):
self.stop_monitor_bk()
reactor.monitor_bk = reactor.callLater(delay, self.monitor_bk)
def monitor_bk(self):
time_diff = time.time() - self.last_pos_cycle
if (self.nodeState.state == NState.synced or self.nodeState.state == NState.unsynced) and 90 < time_diff:
if self.nodeState.state == NState.synced:
self.stop_post_block_logic()
self.reset_everything()
self.update_node_state(NState.unsynced)
self.epoch_diff = -1
elif time.time() - self.last_bk_time > 120:
self.last_pos_cycle = time.time()
logger.info(' POS cycle activated by monitor_bk() ')
self.update_node_state(NState.synced)
if self.nodeState.state == NState.syncing and time.time() - self.last_pb_time > 60:
self.stop_post_block_logic()
self.reset_everything()
self.update_node_state(NState.unsynced)
self.epoch_diff = -1
reactor.monitor_bk = reactor.callLater(60, self.monitor_bk)
def peers_blockheight_headerhash(self):
for peer in self.p2pFactory.peers:
peer.fetch_headerhash_n(self.chain.m_blockheight())
def check_fork_status(self):
current_height = self.chain.m_blockheight()
block_hash_counter = Counter()
for peer in self.p2pFactory.peers:
if current_height in list(peer.blocknumber_headerhash.keys()):
block_hash_counter[peer.blocknumber_headerhash[current_height]] += 1
blockhash = block_hash_counter.most_common(1)
if blockhash:
blockhash = blockhash[0][0]
actual_blockhash = self.chain.m_get_block(current_height).blockheader.headerhash
if actual_blockhash != blockhash:
logger.info('Blockhash didnt matched in peers_blockheight()')
logger.info('Local blockhash - %s', actual_blockhash)
logger.info('Consensus blockhash - %s', blockhash)
fork_recovery(current_height, self.chain, self.randomize_headerhash_fetch)
return True
return
def peers_blockheight(self):
if self.nodeState.state == NState.syncing:
return
if self.check_fork_status():
return
block_height_counter = Counter()
for peer in self.p2pFactory.peers:
block_height_counter[peer.blockheight] += 1
blocknumber = block_height_counter.most_common(1)
if not blocknumber:
return # TODO : Re-Schedule with delay
blocknumber = blocknumber[0][0]
if blocknumber > self.chain.height(): # chain.m_blockheight(): len(chain.m_blockchain)
# pending_blocks['target'] = blocknumber
logger.info('Calling downloader from peers_blockheight due to no POS CYCLE %s', blocknumber)
logger.info('Download block from %s to %s', self.chain.height() + 1, blocknumber)
self.last_pb_time = time.time()
self.update_node_state(NState.syncing)
self.randomize_block_fetch(self.chain.height() + 1)
return
def schedule_peers_blockheight(self, delay=100):
try:
reactor.peers_blockheight.cancel()
except Exception: # No need to log this exception
pass
reactor.peers_blockheight = reactor.callLater(delay, self.peers_blockheight)
try:
reactor.peers_blockheight_headerhash.cancel() # No need to log this exception
except Exception as e:
pass
reactor.peers_blockheight_headerhash = reactor.callLater(70, self.peers_blockheight_headerhash)
# pos functions. an asynchronous loop.
# first block 1 is created with the stake list for epoch 0 decided from circulated st transactions
def pre_pos_1(self, data=None): # triggered after genesis for block 1..
logger.info('pre_pos_1')
# are we a staker in the stake list?
if self.chain.mining_address not in self.chain.m_blockchain[0].stake_list:
logger.info('%s %s', self.chain.mining_address, self.chain.m_blockchain[0].stake_list)
return
logger.info('mining address: %s in the genesis.stake_list', self.chain.mining_address)
xmss = self.chain.wallet.address_bundle[0].xmss
tmphc = hashchain(xmss.get_seed_private(), epoch=0)
self.chain.hash_chain = tmphc.hashchain
self.chain.block_chain_buffer.hash_chain[0] = tmphc.hashchain
tmpbalance = self.chain.state.balance(self.chain.mining_address)
slave_xmss = self.chain.block_chain_buffer.get_slave_xmss(0)
if not slave_xmss:
logger.info('Waiting for SLAVE XMSS to be done')
reactor.callLater(5, self.pre_pos_1)
return
signing_xmss = self.chain.wallet.address_bundle[0].xmss
st = StakeTransaction.create(blocknumber=0,
xmss=signing_xmss,
slavePK=slave_xmss.pk(),
finalized_blocknumber=0,
finalized_headerhash=sha2_256(config.dev.genesis_prev_headerhash.encode()),
hashchain_terminator=tmphc.hc_terminator,
balance=tmpbalance)
st.sign(signing_xmss)
self.chain.add_tx_to_pool(st)
# send the stake tx to generate hashchain terminators for the staker addresses..
self.p2pFactory.send_st_to_peers(st)
logger.info('await delayed call to build staker list from genesis')
reactor.callLater(5, self.pre_pos_2, st)
def pre_pos_2(self, data=None):
logger.info('pre_pos_2')
if self.chain.height() >= 1:
return
# assign hash terminators to addresses and generate a temporary stake list ordered by st.hash..
tmp_list = []
for tx in self.chain.transaction_pool:
if tx.subtype == qrl.core.Transaction_subtypes.TX_SUBTYPE_STAKE:
if tx.txfrom in self.chain.m_blockchain[0].stake_list:
tmp_list.append([tx.txfrom, tx.hash, 0, GenesisBlock().get_info()[tx.txfrom],
tx.slave_public_key])
self.chain.state.stake_validators_list.add_sv(tx, 0)
self.chain.block_chain_buffer.epoch_seed = self.chain.state.calc_seed(tmp_list)
# TODO : Needed to be reviewed later
self.chain.stake_list = sorted(tmp_list,
key=lambda staker: self.chain.score(stake_address=staker[0],
reveal_one=bin2hstr(sha256(str(
reduce(lambda set1, set2: set1 + set2,
tuple(staker[1]))).encode())),
balance=staker[3],
seed=self.chain.block_chain_buffer.epoch_seed))
self.chain.block_chain_buffer.epoch_seed = format(self.chain.block_chain_buffer.epoch_seed, 'x')
logger.info('genesis stakers ready = %s / %s', len(self.chain.stake_list), config.dev.minimum_required_stakers)
logger.info('node address: %s', self.chain.mining_address)
if len(self.chain.stake_list) < config.dev.minimum_required_stakers: # stake pool still not full..reloop..
self.p2pFactory.send_st_to_peers(data)
logger.info('waiting for stakers.. retry in 5s')
reactor.callID = reactor.callLater(5, self.pre_pos_2, data)
return
if self.chain.mining_address == self.chain.stake_list[0][0]:
logger.info('designated to create block 1: building block..')
tmphc = hashchain(self.chain.wallet.address_bundle[0].xmss.get_seed_private())
# create the genesis block 2 here..
reveal_hash = self.chain.select_hashchain(self.chain.mining_address,
tmphc.hashchain,
blocknumber=1)
b = self.chain.m_create_block(reveal_hash[-2])
self.pre_block_logic(b)
else:
logger.info('await block creation by stake validator: %s', self.chain.stake_list[0][0])
self.last_bk_time = time.time()
self.restart_unsynced_logic()
return
def process_transactions(self, num):
tmp_num = num
for tx in self.chain.pending_tx_pool:
tmp_num -= 1
tx_peer = tx[1]
tx = tx[0]
if not tx.validate_tx():
logger.info('>>>TX %s failed validate_tx', tx.txhash)
continue
block_chain_buffer = self.chain.block_chain_buffer
tx_state = block_chain_buffer.get_stxn_state(blocknumber=block_chain_buffer.height(),
addr=tx.txfrom)
isValidState = tx.state_validate_tx(
tx_state=tx_state,
transaction_pool=self.chain.transaction_pool
)
if not isValidState:
logger.info('>>>TX %s failed state_validate', tx.txhash)
continue
logger.info('>>>TX - %s from - %s relaying..', tx.txhash, tx_peer.transport.getPeer().host)
self.chain.add_tx_to_pool(tx)
txn_msg = tx_peer.wrap_message('TX', tx.to_json())
for peer in tx_peer.factory.peer_connections:
if peer != tx_peer:
peer.transport.write(txn_msg)
for i in range(num - tmp_num):
del self.chain.pending_tx_pool[0]
del self.chain.pending_tx_pool_hash[0]
# create new block..
def create_new_block(self, reveal_hash, last_block_number):
logger.info('create_new_block #%s', (last_block_number + 1))
block_obj = self.chain.create_stake_block(reveal_hash, last_block_number)
return block_obj
def reset_everything(self, data=None):
logger.info('** resetting loops and emptying chain.stake_reveal_one and chain.expected_winner ')
for r in self.chain.stake_reveal_one:
msg_hash = r[5]
self.master_mr.deregister(msg_hash, 'R1')
del self.chain.stake_reveal_one[:]
return
def filter_reveal_one_two(self, blocknumber=None):
if not blocknumber:
blocknumber = self.chain.m_blockchain[-1].blockheader.blocknumber
self.chain.stake_reveal_one = [s for s in self.chain.stake_reveal_one if s[2] > blocknumber]
return
# TODO: Incomplete fn, use to select the maximum blockheight by consensus
def select_blockheight_by_consensus(self):
block_height_counter = Counter()
# for identity in self.fmbh_allowed_peers:
# block_height_counter[s[2]] += 1
target_block_height = block_height_counter.most_common(1)
if len(target_block_height) == 0:
return None
last_selected_height = target_block_height[0][0]
return last_selected_height
'''
Unsynced Logic
1. Request for maximum blockheight and passes bock number X
2. Peers response chain height with headerhash and the headerhash of block number X
3. Unsynced node, selects most common chain height, matches the headerhash of block number X
4. If headerhash of block number X doesn't match, change state to Forked
5. If headerhash of block number X matches, perform Downloading of blocks from those selected peers
'''
def restart_unsynced_logic(self, delay=0):
try:
reactor.unsynced_logic.cancel()
except Exception: # No need to log this exception
pass
reactor.unsynced_logic = reactor.callLater(delay, self.unsynced_logic)
def unsynced_logic(self):
if self.nodeState.state == NState.synced:
return
self.fmbh_blockhash_peers = {}
self.fmbh_allowed_peers = {}
for peer in self.p2pFactory.peer_connections:
self.fmbh_allowed_peers[peer.conn_identity] = None
peer.fetch_FMBH()
reactor.unsynced_logic = reactor.callLater(20, self.start_download)
def start_download(self):
# add peers and their identity to requested list
# FMBH
if self.nodeState.state == NState.synced:
return
logger.info('Checking Download..')
'''
global fmbh_blockhash_peers
max_height = None
selected_blockhash = None
for blockheaderhash in fmbh_blockhash_peers:
if fmbh_blockhash_peers[blockheaderhash]['blocknumber']>max_height:
max_height = fmbh_blockhash_peers[blockheaderhash]['blocknumber']
selected_blockhash = blockheaderhash
for peer in fmbh_blockhash_peers[selected_blockhash]['peers']:
f.target_peers = {}
f.target_peers[peer.conn_identity] = peer
if max_height == None or max_height<=chain.height():
chain.state.update(NState.synced)
return
chain.state.update(NState.syncing)
pending_blocks['start_block'] = chain.m_blockchain[-1].blockheader.blocknumber
pending_blocks['target'] = fmbh_blockhash_peers[selected_blockhash]['blocknumber']
pending_blocks['headerhash'] = selected_blockhash
randomize_block_fetch(chain.height() + 1)
'''
tmp_max = -1
max_headerhash = None
for headerhash in self.fmbh_blockhash_peers:
if self.fmbh_blockhash_peers[headerhash]['blocknumber'] > self.chain.height():
if len(self.fmbh_blockhash_peers[headerhash]['peers']) > tmp_max:
tmp_max = len(self.fmbh_blockhash_peers[headerhash]['peers'])
max_headerhash = headerhash
# Adding all peers
# TODO only trusted peer
# for peer in self.p2pFactory.peers:
if not max_headerhash:
logger.info('No peers responded FMBH request')
return
for peer in self.fmbh_blockhash_peers[max_headerhash]['peers']:
self.p2pFactory.target_peers[peer.conn_identity] = peer
self.update_node_state(NState.syncing)
self.randomize_block_fetch(self.chain.height() + 1)
def pre_block_logic(self, block):
if len(self.chain.m_blockchain) == 0:
self.chain.m_read_chain()
blocknumber = block.blockheader.blocknumber
chain_buffer_height = self.chain.block_chain_buffer.height()
last_block_before = self.chain.block_chain_buffer.get_last_block()
if blocknumber <= self.chain.height():
return False
if self.nodeState.state == NState.synced:
if not self.chain.block_chain_buffer.add_block(block):
return
elif chain_buffer_height + 1 == blocknumber:
if blocknumber > 1:
if not self.chain.block_chain_buffer.add_block(block):
return
elif blocknumber == 1:
if not self.chain.add_block_mainchain(block):
return
self.update_node_state(NState.synced)
else:
self.chain.block_chain_buffer.add_pending_block(block)
if self.nodeState.state == NState.synced:
last_block_after = self.chain.block_chain_buffer.get_last_block()
self.last_pos_cycle = time.time()
self.p2pFactory.send_block_to_peers(block)
if last_block_before.blockheader.headerhash != last_block_after.blockheader.headerhash:
self.schedule_pos(blocknumber + 1)
return True
def schedule_pos(self, blocknumber):
if self.nodeState.state == NState.synced:
if self.pos_callLater and self.pos_callLater.active():
if blocknumber > self.pos_blocknum:
return
self.restart_post_block_logic(blocknumber)
def stop_post_block_logic(self):
try:
self.pos_callLater.cancel()
except Exception: # No need to log this exception
pass
def restart_post_block_logic(self, blocknumber=-1, delay=None):
if blocknumber == -1:
blocknumber = self.chain.block_chain_buffer.height() + 1
if not delay:
last_block = self.chain.block_chain_buffer.get_block_n(blocknumber - 1)
last_block_timestamp = last_block.blockheader.timestamp
curr_timestamp = int(self.ntp.getTime())
delay = max(0, last_block_timestamp + config.dev.minimum_minting_delay - curr_timestamp)
self.stop_post_block_logic()
self.pos_callLater = reactor.callLater(delay,
self.post_block_logic,
blocknumber=blocknumber)
self.pos_blocknum = blocknumber
def create_next_block(self, blocknumber, entry_blocknumber):
if not self.chain.block_chain_buffer.get_slave_xmss(blocknumber):
return
hash_chain = self.chain.block_chain_buffer.hash_chain_get(blocknumber)
my_reveal = hash_chain[::-1][blocknumber - entry_blocknumber]
block = self.create_new_block(my_reveal,
blocknumber - 1)
self.pre_block_logic(block) # broadcast this block
def post_block_logic(self, blocknumber):
"""
post block logic we initiate the next POS cycle
send ST, reset POS flags and remove unnecessary
messages in chain.stake_reveal_one and _two..
:return:
"""
if self.p2pFactory.stake:
stake_list = self.chain.block_chain_buffer.stake_list_get(blocknumber)
if self.chain.mining_address not in stake_list:
self.make_st_tx(blocknumber)
stake_list = self.chain.block_chain_buffer.stake_list_get(blocknumber)
delay = config.dev.minimum_minting_delay
if self.chain.mining_address in stake_list:
if stake_list[self.chain.mining_address].is_banned:
logger.warning('You have been banned.')
else:
entry_blocknumber = stake_list[self.chain.mining_address].entry_blocknumber
self.create_next_block(blocknumber, entry_blocknumber)
delay = None
last_blocknum = self.chain.block_chain_buffer.height()
self.restart_post_block_logic(last_blocknum + 1, delay)
return
def make_st_tx(self, blocknumber):
balance = self.chain.block_chain_buffer.get_stxn_state(blocknumber, self.chain.mining_address)[1]
if balance < config.dev.minimum_staking_balance_required:
logger.warning('Staking not allowed due to insufficient balance')
logger.warning('Balance %s', balance)
return
slave_xmss = self.chain.block_chain_buffer.get_next_slave_xmss(blocknumber)
if not slave_xmss:
return
signing_xmss = self.chain.wallet.address_bundle[0].xmss
finalized_blocknumber = ((blocknumber - 1) // config.dev.blocks_per_epoch) * config.dev.blocks_per_epoch
finalized_block = self.chain.block_chain_buffer.get_block_n(finalized_blocknumber)
if not finalized_block:
logger.warning('Cannot make ST txn, unable to get blocknumber %s', finalized_blocknumber)
return
finalized_headerhash = finalized_block.blockheader.headerhash
st = StakeTransaction.create(
blocknumber=blocknumber,
xmss=signing_xmss,
slavePK=slave_xmss.pk(),
finalized_blocknumber = finalized_blocknumber,
finalized_headerhash = finalized_headerhash,
balance=balance
)
st.sign(signing_xmss)
self.p2pFactory.send_st_to_peers(st)
for num in range(len(self.chain.transaction_pool)):
t = self.chain.transaction_pool[num]
if t.subtype == qrl.core.Transaction_subtypes.TX_SUBTYPE_STAKE and st.hash == t.hash:
if st.get_message_hash() == t.get_message_hash():
return
self.chain.remove_tx_from_pool(t)
break
self.chain.add_tx_to_pool(st)
self.chain.wallet.save_wallet()
def schedule_prepare_winners(self, our_reveal, last_block_number, delay=0):
try:
reactor.prepare_winners.cancel()
except Exception: # No need to log this Exception
pass
reactor.prepare_winners = reactor.callLater(
delay,
self.prepare_winners,
our_reveal=our_reveal,
last_block_number=last_block_number)
def randomize_block_fetch(self, blocknumber):
if self.nodeState.state != NState.syncing or blocknumber <= self.chain.height():
return
if len(list(self.p2pFactory.target_peers.keys())) == 0:
logger.info(' No target peers found.. stopping download')
return
reactor.download_monitor = reactor.callLater(20,
self.randomize_block_fetch, blocknumber)
random_peer = self.p2pFactory.target_peers[random.choice(list(self.p2pFactory.target_peers.keys()))]
random_peer.fetch_block_n(blocknumber)
def randomize_headerhash_fetch(self, block_number):
if self.nodeState.state != NState.forked:
return
if block_number not in fork.pending_blocks or fork.pending_blocks[block_number][1] <= 10: # retry only 11 times
headerhash_monitor = reactor.callLater(15, self.randomize_headerhash_fetch, block_number)
if len(self.p2pFactory.peers) > 0:
try:
if len(self.p2pFactory.fork_target_peers) == 0:
for peer in self.p2pFactory.peers:
self.p2pFactory.fork_target_peers[peer.conn_identity] = peer
if len(self.p2pFactory.fork_target_peers) > 0:
random_peer = self.p2pFactory.fork_target_peers[
random.choice(
list(self.p2pFactory.fork_target_peers.keys())
)
]
count = 0
if block_number in fork.pending_blocks:
count = fork.pending_blocks[block_number][1] + 1
fork.pending_blocks[block_number] = [
random_peer.conn_identity, count, None, headerhash_monitor
]
random_peer.fetch_headerhash_n(block_number)
except Exception as e:
logger.warning('Exception at randomize_headerhash_fetch %s', e)
else:
logger.info('No peers connected.. Will try again... randomize_headerhash_fetch: %s', block_number)
else:
self.update_node_state(NState.unsynced)
def blockheight_map(self):
"""
blockheight map for connected nodes - when the blockheight seems up to date after a sync or error, we check all connected nodes to ensure all on same chain/height..
note - may not return correctly during a block propagation..
once working alter to identify fork better..
:return:
"""
# i = [block_number, headerhash, self.transport.getPeer().host]
logger.info('blockheight_map:')
logger.info(self.chain.blockheight_map)
# first strip out any laggards..
self.chain.blockheight_map = [q for q in self.chain.blockheight_map if q[0] >= self.chain.m_blockheight()]
result = True
# next identify any node entries which are not exactly correct..
for s in self.chain.blockheight_map:
if s[0] == self.chain.m_blockheight():
if s[1] == self.chain.m_blockchain[-1].blockheader.headerhash:
logger.info(('node: ', s[2], '@', s[0], 'w/:', s[1], 'OK'))
elif s[0] > self.chain.m_blockheight():
logger.info(('warning..', s[2], 'at blockheight', s[0]))
result = False
# wipe it..
del self.chain.blockheight_map[:]
return result
|
|
import os.path
import os
import keras_code.rnns.sequence.models as models
import matplotlib.pyplot as plt
import numpy as np
import random
import time
import datetime
import argparse
import utilities.paths as paths
DRIVE = paths.get_drive()
def train(model_id, model_path, data_paths_path, feature_path, nb_epoch, batch_size, load_epoch):
start_time = time.clock()
t_la = [[], []]
t_l = [[], []]
t_a = [[], []]
v_l = [[], []]
v_a = [[], []]
fig = None
model = models.get_model_from_id(model_id)
if model is None:
return
model_path = model_path + model_id
# Load log
if not os.path.exists(model_path):
os.makedirs(model_path)
log = open(model_path + '/log.txt', "a")
log.write('\n\n\nTraining initialised: {:%Y-%m-%d %H:%M:%S}'.format(datetime.datetime.now()))
if load_epoch == 0:
print 'Training model from scratch...'
log.write('\nTraining model from scratch...')
else:
if load_epoch < 0 or load_epoch is None: # get latest
for i in range(100, -1, -1):
if os.path.isfile(model_path + '/' + model_id + '_' + str(i) + '.h5'):
load_epoch = i
break
if load_epoch is None:
load_epoch = 0
if load_epoch == 0:
log.write('\nTraining model from scratch...')
else:
print 'Loading past model to train from:'
print model_path + '/' + model_id + '_' + str(load_epoch) + '.h5'
log.write('\nLoading past model to train from:')
log.write('\n' + model_path + '/' + model_id + '_' + str(load_epoch) + '.h5')
[t_l, v_l, v_a] = np.load(model_path + '/training_stats_' + str(load_epoch) + '.npy')
model.load_weights(model_path + '/' + model_id + '_' + str(load_epoch) + '.h5')
model = models.compile_model(model_id, model)
for e in range(load_epoch + 1, nb_epoch+1):
print(
"--------------------------------------------\nepoch %d\n--------------------------------------------" % e)
log.write(
"\n--------------------------------------------\nepoch %d\n--------------------------------------------" % e)
# get data
with open(data_paths_path + 'train_paths_equalised.txt') as f:
all_paths = f.readlines()
random.shuffle(all_paths) # randomise order every epoch!!
all_paths = [line.split() for line in all_paths] # split so x and y split
X_batch = []
Y_batch = []
sum_loss = 0
past = 0
count = 0
inner_count = 0
start_time_inner = time.clock()
for path in all_paths:
count += 1
x, y = models.load_input(model_id, feature_path, path)
X_batch.append(x)
Y_batch.append(y)
if (count % batch_size == 0) or (count == len(all_paths)):
# print 'B'
if count == len(all_paths):
inner_count + 1
Y_batch = np.squeeze(Y_batch)
loss, acc = model.train_on_batch(X_batch, Y_batch)
sum_loss += loss
inner_count += 1
# clear batch
X_batch = []
Y_batch = []
if (int((float(count) / len(all_paths)) * 100) > past) or (count == len(all_paths)):
tr = (len(all_paths) - count) / ((count) / (time.clock() - start_time_inner))
trt = ((nb_epoch - e + 1) * len(all_paths) - count) / (
((e - 1) * len(all_paths) + count) / (time.clock() - start_time))
print '(%d) [%.5f] Image: %d / %d; Epoch TR: %02d:%02d:%02d; Total TR: %02d:%02d:%02d;' % (
past, sum_loss / inner_count, count, len(all_paths), int((tr / 60) / 60), int((tr / 60) % 60),
int(tr % 60),
int((trt / 60) / 60), int((trt / 60) % 60), int(trt % 60))
log.close()
log = open(model_path + '/log.txt', "a")
log.write('\n(%d) [%.5f] Image: %d / %d; Epoch TR: %02d:%02d:%02d; Total TR: %02d:%02d:%02d;' % (
past, sum_loss / inner_count, count, len(all_paths), int((tr / 60) / 60), int((tr / 60) % 60),
int(tr % 60),
int((trt / 60) / 60), int((trt / 60) % 60), int(trt % 60)))
t_l[0].append((e - 1) + past * .01)
t_l[1].append(sum_loss / inner_count)
# graph it
if fig:
plt.close()
fig, ax1 = plt.subplots()
ax1.plot(t_l[0], t_l[1], 'g-')
ax1.plot(v_l[0], v_l[1], 'b-')
ax1.set_ylim(bottom=0)
ax2 = ax1.twinx()
ax2.plot(v_a[0], v_a[1], 'r-')
ax2.set_ylim(top=1)
# plt.plot(t_l[0], t_l[1])
# plt.plot(v_l[0],v_l[1])
# plt.plot(v_a[0],v_a[1])
# plt.show(block=False)
past += 10
sum_loss = 0
inner_count = 0
# if past > 0:
# break
print '--------------------------------------------'
print 'Validation results:'
log.write('\n--------------------------------------------')
log.write('\nValidation results:\n')
with open(data_paths_path + 'val_paths_equalised.txt') as f:
all_val_paths = f.readlines()
random.shuffle(all_val_paths) # randomise order every epoch!!
all_val_paths = [line.split() for line in all_val_paths] # split so x and y split
X_val = []
Y_val = []
count = 0
past = 0
val_metrics = []
for path in all_val_paths:
count += 1
x, y = models.load_input(model_id, feature_path, path)
X_val.append(x)
Y_val.append(y)
if (count % batch_size == 0) or (count == len(all_paths)):
# test
Y_val = np.squeeze(Y_val)
val_metrics.append(model.test_on_batch(X_val, Y_val))
# clear batch
X_val = []
Y_val = []
if int((float(count) / len(all_val_paths)) * 100) > past:
print('.'),
log.write('.')
past += 10
print '\n'
val_results = np.average(val_metrics, axis=0)
print val_results
log.write('\n' + str(val_results))
v_l[0].append(e)
v_l[1].append(val_results[0])
v_a[0].append(e)
v_a[1].append(val_results[1])
if e % 1 == 0:
if not os.path.exists(model_path):
os.makedirs(model_path)
model.save_weights(model_path + '/' + model_id + '_' + str(e) + '.h5', overwrite=True)
# fig.savefig(model_path + '/training.png')
fig.savefig(model_path + '/training.pdf')
np.save(model_path + '/training_stats_' + str(e) + '.npy', [t_l, v_l, v_a])
tt = time.clock() - start_time
print 'Total Time Taken: %02d:%02d:%02d;' % (int((tt / 60) / 60), int((tt / 60) % 60), int(tt % 60))
log.write('\n\nTotal Time Taken: %02d:%02d:%02d;' % (int((tt / 60) / 60), int((tt / 60) % 60), int(tt % 60)))
return model
if __name__ == "__main__":
CLUSTER = paths.is_cluster() # AT END OF PROJECT MAYBE FIX SO
if CLUSTER:
p = argparse.ArgumentParser()
p.add_argument('model_id', help='The model ID MV..._01')
p.add_argument('model_path', help='The path the model save location')
p.add_argument('data_paths_path', help='The path to the txt file with list of inputs')
p.add_argument('feature_path', help='The path to the npy ind feature files')
p.add_argument('nb_epoch', type=int, default=20, help='The number of epochs (def: 20)')
p.add_argument('batch_size', type=int, default=16, help='The batch size (def: 16)')
p.add_argument('--load_epoch', help="load a particular saved epoch")
p = p.parse_args()
train(p.model_id, p.model_path, p.data_paths_path, p.nb_epoch, p.batch_size, p.load_epoch)
else:
model_id = 'MQK_50_04'
model_path = DRIVE + 'MODELS/SEQUENCE/KERAS/'
split_id = 'S010'
data_paths_path = DRIVE + 'DATASETS/VIDEO/TENNIS/SPLITS/'+split_id+'/DIGITS/'
feature_path = DRIVE + 'DATASETS/VIDEO/TENNIS/FEATURES/SPLITS/'+split_id+'/'
nb_epoch = 10
batch_size = 128
load_epoch = 0
train(model_id, model_path, data_paths_path, feature_path, nb_epoch, batch_size, load_epoch)
|
|
# Copyright 2014 - Rackspace Hosting
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import json
from unittest import mock
from heatclient import exc
from oslo_config import cfg
import yaml
from solum.common import exception
from solum.deployer.handlers import heat as heat_handler
from solum.objects import assembly
from solum.tests import base
from solum.tests import fakes
from solum.tests import utils
STATES = assembly.States
class HandlerTest(base.BaseTestCase):
def setUp(self):
super(HandlerTest, self).setUp()
self.ctx = utils.dummy_context()
def test_create(self):
handler = heat_handler.Handler()
handler.echo = mock.MagicMock()
handler.echo({}, 'foo')
handler.echo.assert_called_once_with({}, 'foo')
@mock.patch('solum.deployer.handlers.heat.tlog')
@mock.patch('solum.deployer.handlers.heat.update_assembly')
@mock.patch('solum.common.catalog.get')
@mock.patch('solum.objects.registry')
@mock.patch('solum.common.clients.OpenStackClients')
@mock.patch('solum.deployer.handlers.heat.get_app_by_assem_id')
@mock.patch('solum.deployer.handlers.heat.get_heat_client')
def test_deploy_docker_on_vm_with_dreg(self, mock_ht_cl,
mock_get_app, mock_clients,
mock_registry, mock_get_templ,
mock_ua, m_log):
handler = heat_handler.Handler()
fake_assembly = fakes.FakeAssembly()
m_log.TenantLogger.call.return_value = mock.MagicMock()
mock_registry.Assembly.get_by_id.return_value = fake_assembly
fake_template = self._get_fake_template()
template = self._get_tmpl_for_docker_reg(fake_assembly, fake_template,
'created_image_id')
cfg.CONF.api.image_format = "vm"
cfg.CONF.worker.image_storage = "docker_registry"
mock_get_templ.return_value = template
handler._find_id_if_stack_exists = mock.MagicMock(return_value=(None))
created_stack = {"stack": {
"id": "fake_id",
"links": [{"href": "http://fake.ref",
"rel": "self"}]}}
mock_ht_cl.return_value.stacks.create.return_value = created_stack
handler._check_stack_status = mock.MagicMock()
handler.deploy(self.ctx, 77, 'created_image_id', 'image_name', [80])
self.assertTrue(mock_ht_cl.return_value.stacks.create.called)
assign_and_create_mock = mock_registry.Component.assign_and_create
comp_name = 'Heat_Stack_for_%s' % fake_assembly.name
assign_and_create_mock.assert_called_once_with(self.ctx,
fake_assembly,
comp_name,
'heat_stack',
'Heat Stack test',
'http://fake.ref',
'fake_id')
@mock.patch('solum.deployer.handlers.heat.tlog')
@mock.patch('solum.deployer.handlers.heat.update_assembly')
@mock.patch('solum.common.catalog.get')
@mock.patch('solum.common.catalog.get_from_contrib')
@mock.patch('solum.objects.registry')
@mock.patch('solum.common.clients.OpenStackClients')
@mock.patch('solum.deployer.handlers.heat.get_heat_client')
def test_deploy_docker_on_vm_swift(self, heat_clnt, mock_clients,
mock_registry,
mock_contrib, mock_get_templ,
mock_ua, m_log):
handler = heat_handler.Handler()
mock_log = m_log.TenantLogger.return_value.log
fake_assembly = fakes.FakeAssembly()
mock_registry.Assembly.get_by_id.return_value = fake_assembly
fake_template = self._get_fake_template()
img = "http://a.b.c/d?temp_url_sig=v&temp_url_expires=v"
mock_contrib.return_value = "robust_file"
get_file_dict = {}
get_file_dict[self._get_key()] = "robust_file"
cfg.CONF.api.image_format = "vm"
cfg.CONF.worker.image_storage = "swift"
cfg.CONF.deployer.flavor = "flavor"
cfg.CONF.deployer.image = "coreos"
mock_get_templ.return_value = fake_template
handler._find_id_if_stack_exists = mock.MagicMock(return_value=(None))
created_stack = {"stack": {
"id": "fake_id",
"links": [{"href": "http://fake.ref",
"rel": "self"}]}}
heat_clnt.return_value.stacks.create.return_value = created_stack
handler._check_stack_status = mock.MagicMock()
handler.deploy(self.ctx, 77, img, 'tenant-name-ts-commit', [80])
parameters = {'name': fake_assembly.uuid,
'flavor': "flavor",
'image': "coreos",
'key_name': "mykey",
'location': img,
'du': 'tenant-name-ts-commit',
'publish_ports': '-p 80:80'}
heat_clnt.return_value.stacks.create.assert_called_once_with(
stack_name='faker-test_uuid',
template=fake_template,
parameters=parameters,
files=get_file_dict)
assign_and_create_mock = mock_registry.Component.assign_and_create
comp_name = 'Heat_Stack_for_%s' % fake_assembly.name
assign_and_create_mock.assert_called_once_with(self.ctx,
fake_assembly,
comp_name,
'heat_stack',
'Heat Stack test',
'http://fake.ref',
'fake_id')
self.assertTrue(mock_log.called)
@mock.patch('solum.deployer.handlers.heat.tlog')
@mock.patch('solum.deployer.handlers.heat.update_assembly')
@mock.patch('solum.common.catalog.get')
@mock.patch('solum.objects.registry')
@mock.patch('solum.common.clients.OpenStackClients')
@mock.patch('solum.deployer.handlers.heat.get_heat_client')
def test_comp_name_error(self, heat_clnt, mock_clients, mock_registry,
mock_get_templ, mock_ua, m_log):
handler = heat_handler.Handler()
mock_log = m_log.TenantLogger.return_value.log
fake_assembly = fakes.FakeAssembly()
mock_registry.Assembly.get_by_id.return_value = fake_assembly
fake_template = json.dumps({'description': 'test'})
mock_get_templ.return_value = fake_template
handler._find_id_if_stack_exists = mock.MagicMock(return_value=(None))
stacks = heat_clnt.return_value.heat.return_value.stacks
stacks.create.return_value = {"stack": {
"id": "fake_id",
"links": [{"href": "http://fake.ref",
"rel": "self"}]}}
handler._check_stack_status = mock.MagicMock()
handler.deploy(self.ctx, 77, 'created_image_id', 'image_name', [80])
assign_and_create_mock = mock_registry.Component.assign_and_create
comp_name = 'Heat Stack for %s' % fake_assembly.name
self.assertRaises(AssertionError,
assign_and_create_mock.assert_called_once_with,
comp_name)
self.assertTrue(mock_log.called)
@mock.patch('solum.deployer.handlers.heat.tlog')
@mock.patch('solum.deployer.handlers.heat.update_assembly')
@mock.patch('solum.common.catalog.get')
@mock.patch('solum.common.catalog.get_from_contrib')
@mock.patch('solum.objects.registry')
@mock.patch('solum.common.clients.OpenStackClients')
@mock.patch('solum.deployer.handlers.heat.get_heat_client')
def test_deploy_docker(self, heat_clnt, mock_clients, mock_registry,
mock_get_contrib, mock_get_templ, mock_ua, m_log):
handler = heat_handler.Handler()
fake_assembly = fakes.FakeAssembly()
mock_registry.Assembly.get_by_id.return_value = fake_assembly
m_log.TenantLogger.call.return_value = mock.MagicMock()
cfg.CONF.api.image_format = "docker"
mock_get_contrib.return_value = "robust_file"
get_file_dict = {}
get_file_dict[self._get_key()] = "robust_file"
fake_template = self._get_fake_template()
template = self._get_tmpl_for_docker_reg(fake_assembly, fake_template,
'created_image_id')
mock_get_templ.return_value = template
handler._find_id_if_stack_exists = mock.MagicMock(return_value=(None))
created_stack = {"stack": {
"id": "fake_id",
"links": [{"href": "http://fake.ref",
"rel": "self"}]}}
heat_clnt.return_value.stacks.create.return_value = created_stack
handler._check_stack_status = mock.MagicMock()
handler.deploy(self.ctx, 77, 'created_image_id', 'image_name', [80])
parameters = {'image': 'created_image_id',
'app_name': 'faker',
'port': 80}
heat_clnt.return_value.stacks.create.assert_called_once_with(
stack_name='faker-test_uuid',
template=template,
parameters=parameters,
files=get_file_dict)
assign_and_create_mock = mock_registry.Component.assign_and_create
comp_name = 'Heat_Stack_for_%s' % fake_assembly.name
assign_and_create_mock.assert_called_once_with(self.ctx,
fake_assembly,
comp_name,
'heat_stack',
'Heat Stack test',
'http://fake.ref',
'fake_id')
@mock.patch('solum.deployer.handlers.heat.update_wf_and_app')
@mock.patch('solum.deployer.handlers.heat.update_assembly')
@mock.patch('solum.common.clients.OpenStackClients')
@mock.patch('httplib2.Http')
@mock.patch('solum.common.repo_utils')
@mock.patch('solum.deployer.handlers.heat.get_heat_client')
def test_update_assembly_status(self, heat_clnt, mock_repo, mock_http,
mock_clients, mock_ua, mock_wf_app):
cfg.CONF.set_override('wait_interval', 1, group='deployer')
cfg.CONF.set_override('growth_factor', 1, group='deployer')
cfg.CONF.set_override('max_attempts', 1, group='deployer')
handler = heat_handler.Handler()
fake_assembly = fakes.FakeAssembly()
stack = mock.MagicMock()
stack.status = 'COMPLETE'
heat_clnt.return_value.stacks.get.return_value = stack
resp = {'status': '200'}
conn = mock.MagicMock()
conn.request.return_value = [resp, '']
mock_http.return_value = conn
cfg.CONF.deployer.du_attempts = 1
mock_logger = mock.MagicMock()
handler._parse_server_ip = mock.MagicMock(return_value=('xyz'))
mock_repo.is_reachable.return_value = True
handler._check_stack_status(self.ctx, fake_assembly.id, heat_clnt,
'fake_id', [80], mock_logger)
c1 = mock.call(self.ctx, fake_assembly.id,
{'status': STATES.STARTING_APP,
'application_uri': 'xyz:80'})
c2 = mock.call(self.ctx, fake_assembly.id,
{'status': 'DEPLOYMENT_COMPLETE'})
calls = [c1, c2]
mock_ua.assert_has_calls(calls, any_order=False)
@mock.patch('solum.deployer.handlers.heat.update_wf_and_app')
@mock.patch('solum.deployer.handlers.heat.update_assembly')
@mock.patch('solum.common.clients.OpenStackClients')
@mock.patch('httplib2.Http')
@mock.patch('solum.deployer.handlers.heat.get_heat_client')
def test_update_assembly_status_multiple_ports(self, heat_clnt, mock_http,
mock_clients, mock_ua,
mock_wf_app):
cfg.CONF.set_override('wait_interval', 1, group='deployer')
cfg.CONF.set_override('growth_factor', 1, group='deployer')
cfg.CONF.set_override('max_attempts', 1, group='deployer')
handler = heat_handler.Handler()
fake_assembly = fakes.FakeAssembly()
stack = mock.MagicMock()
stack.status = 'COMPLETE'
heat_clnt.stacks.get.return_value = stack
resp = {'status': '200'}
conn = mock.MagicMock()
conn.request.return_value = [resp, '']
mock_http.return_value = conn
cfg.CONF.deployer.du_attempts = 1
mock_logger = mock.MagicMock()
handler._parse_server_ip = mock.MagicMock(return_value=('xyz'))
handler._check_stack_status(self.ctx, fake_assembly.id,
heat_clnt.return_value,
'fake_id', [80, 81], mock_logger)
c1 = mock.call(self.ctx, fake_assembly.id,
{'status': STATES.STARTING_APP,
'application_uri': 'xyz:[80,81]'})
c2 = mock.call(self.ctx, fake_assembly.id,
{'status': 'DEPLOYMENT_COMPLETE'})
calls = [c1, c2]
mock_ua.assert_has_calls(calls, any_order=False)
@mock.patch('solum.deployer.handlers.heat.update_assembly')
@mock.patch('solum.common.clients.OpenStackClients')
@mock.patch('solum.deployer.handlers.heat.get_heat_client')
def test_update_assembly_status_failed(self, heat_clnt,
mock_clients, mock_ua):
cfg.CONF.set_override('wait_interval', 1, group='deployer')
cfg.CONF.set_override('growth_factor', 1, group='deployer')
cfg.CONF.set_override('max_attempts', 1, group='deployer')
handler = heat_handler.Handler()
fake_assembly = fakes.FakeAssembly()
stack = mock.MagicMock()
stack.status = 'FAILED'
heat_clnt.return_value.stacks.get.return_value = stack
mock_logger = mock.MagicMock()
handler._check_stack_status(self.ctx, fake_assembly.id,
heat_clnt.return_value,
'fake_id', [80], mock_logger)
mock_ua.assert_called_once_with(self.ctx, fake_assembly.id,
{'status':
STATES.ERROR_STACK_CREATE_FAILED})
@mock.patch('solum.deployer.handlers.heat.update_assembly')
def test_get_template(self, mua):
handler = heat_handler.Handler()
fake_assembly = fakes.FakeAssembly()
image_format = 'vm'
image_storage = 'glance'
image_loc = 'abc'
image_name = 'def'
ports = [80]
mock_logger = mock.MagicMock()
template = handler._get_template(self.ctx, image_format,
image_storage, image_loc, image_name,
fake_assembly, ports, mock_logger)
self.assertIsNone(template)
mua.assert_called_once_with(self.ctx,
fake_assembly.id,
{'status': STATES.ERROR})
@mock.patch('solum.deployer.handlers.heat.update_assembly')
def test_get_template_vm_glance(self, mock_update_assembly):
handler = heat_handler.Handler()
fake_assembly = fakes.FakeAssembly()
image_format = 'vm'
image_storage = 'glance'
image_loc = 'abc'
image_name = 'def'
ports = [80]
mock_logger = mock.MagicMock()
template = handler._get_template(self.ctx, image_format,
image_storage, image_loc, image_name,
fake_assembly, ports, mock_logger)
self.assertIsNone(template)
mock_update_assembly.assert_called_once_with(self.ctx,
fake_assembly.id,
{'status': STATES.ERROR})
@mock.patch('solum.common.catalog.get')
def test_get_template_vm_docker_reg(self, mock_catalog_get):
handler = heat_handler.Handler()
fake_assembly = fakes.FakeAssembly()
template_getter = mock.MagicMock()
template_getter.return_value = self._get_fake_template()
handler._get_template_for_docker_reg = template_getter
image_format = 'vm'
image_storage = 'docker_registry'
image_loc = 'abc'
image_name = 'def'
ports = [80]
mock_logger = mock.MagicMock()
template = handler._get_template(self.ctx, image_format,
image_storage, image_loc, image_name,
fake_assembly, ports, mock_logger)
self.assertIsNotNone(template)
self.assertTrue(handler._get_template_for_docker_reg.called)
mock_catalog_get.assert_called_once_with('templates', 'coreos')
@mock.patch('solum.common.catalog.get')
def test_get_template_vm_swift(self, mock_catalog_get):
handler = heat_handler.Handler()
fake_assembly = fakes.FakeAssembly()
mock_logger = mock.MagicMock()
image_format = 'vm'
image_storage = 'swift'
image_loc = 'abc'
image_name = 'def'
ports = [80]
mock_catalog_get.return_value = self._get_fake_template()
template = handler._get_template(self.ctx, image_format,
image_storage, image_loc,
image_name, fake_assembly,
ports, mock_logger)
self.assertEqual(self._get_fake_template(), template)
self.assertEqual(1, mock_catalog_get.call_count)
self.assertEqual(mock.call('templates', 'coreos'),
mock_catalog_get.call_args)
@mock.patch('solum.deployer.handlers.heat.update_assembly')
@mock.patch('solum.common.catalog.get')
def test_get_template_vm_swift_error(self, mock_catalog_get, mock_ua):
handler = heat_handler.Handler()
fake_assembly = fakes.FakeAssembly()
exc_obj = exception.ObjectNotFound()
mock_catalog_get.side_effect = exc_obj
template_getter = mock.MagicMock()
template_getter.return_value = self._get_fake_template()
handler._get_template_for_swift = template_getter
image_format = 'vm'
image_storage = 'swift'
image_loc = 'abc'
image_name = 'def'
ports = [80]
mock_logger = mock.MagicMock()
template = handler._get_template(self.ctx, image_format,
image_storage, image_loc, image_name,
fake_assembly, ports, mock_logger)
self.assertIsNone(template)
mock_ua.assert_called_once_with(self.ctx, fake_assembly.id,
{'status': STATES.ERROR})
assert not handler._get_template_for_swift.called
@mock.patch('solum.deployer.handlers.heat.update_assembly')
@mock.patch('solum.common.catalog.get')
def test_get_template_docker_read_error(self, mock_catalog_get, mock_ua):
handler = heat_handler.Handler()
fake_assembly = fakes.FakeAssembly()
exc_obj = exception.ObjectNotFound()
mock_catalog_get.side_effect = exc_obj
template_getter = mock.MagicMock()
template_getter.return_value = self._get_fake_template()
handler._get_template_for_swift = template_getter
image_format = 'docker'
image_storage = 'swift'
image_loc = 'abc'
image_name = 'def'
ports = [80]
mock_logger = mock.MagicMock()
template = handler._get_template(self.ctx, image_format,
image_storage, image_loc, image_name,
fake_assembly, ports, mock_logger)
self.assertIsNone(template)
mock_ua.assert_called_once_with(self.ctx, fake_assembly.id,
{'status': STATES.ERROR})
@mock.patch('solum.common.heat_utils.get_network_parameters')
@mock.patch('solum.common.clients.OpenStackClients')
def test_get_parameters_for_docker(self, mock_clients, mock_heat_utils):
handler = heat_handler.Handler()
fake_assembly = fakes.FakeAssembly()
image_format = 'docker'
image_loc = 'abc'
image_name = 'abc'
ports = [80]
mock_logger = mock.MagicMock()
params = handler._get_parameters(self.ctx, image_format,
image_loc, image_name, fake_assembly,
ports, mock_clients, mock_logger)
self.assertEqual(fake_assembly.name, params['app_name'])
self.assertEqual('abc', params['image'])
self.assertEqual(80, params['port'])
@mock.patch('solum.common.heat_utils.get_network_parameters')
@mock.patch('solum.common.clients.OpenStackClients')
def test_get_parameters_for_vm(self, mock_clients, mock_heat_utils):
handler = heat_handler.Handler()
fake_assembly = fakes.FakeAssembly()
image_format = 'vm'
image_loc = 'abc'
image_name = 'abc'
ports = [80]
mock_logger = mock.MagicMock()
cfg.CONF.set_override('flavor', 'abc', group='deployer')
cfg.CONF.set_override('image', 'def', group='deployer')
params = handler._get_parameters(self.ctx, image_format,
image_loc, image_name, fake_assembly,
ports, mock_clients, mock_logger)
self.assertEqual(str(fake_assembly.uuid), params['name'])
self.assertEqual('abc', params['flavor'])
self.assertEqual('def', params['image'])
self.assertIsNone(params.get('port'))
@mock.patch('solum.deployer.handlers.heat.update_assembly')
@mock.patch('solum.common.heat_utils.get_network_parameters')
@mock.patch('solum.common.clients.OpenStackClients')
def test_get_parameters_for_unrecognized_img_format(self, mock_clients,
mock_heat_utils,
mock_ua):
handler = heat_handler.Handler()
fake_assembly = fakes.FakeAssembly()
image_format = 'abc'
image_loc = 'abc'
image_name = 'abc'
ports = [80]
mock_logger = mock.MagicMock()
params = handler._get_parameters(self.ctx, image_format,
image_loc, image_name, fake_assembly,
ports, mock_clients, mock_logger)
self.assertIsNone(params)
mock_ua.assert_called_once_with(self.ctx, fake_assembly.id,
{'status':
STATES.ERROR})
@mock.patch('solum.deployer.handlers.heat.update_assembly')
def test_check_stack_status(self, mock_ua):
handler = heat_handler.Handler()
fake_assembly = fakes.FakeAssembly()
mock_client = mock.MagicMock()
mock_client.stacks.get.side_effect = Exception()
cfg.CONF.set_override('wait_interval', 1, group='deployer')
cfg.CONF.set_override('growth_factor', 1, group='deployer')
cfg.CONF.set_override('max_attempts', 1, group='deployer')
mock_logger = mock.MagicMock()
handler._check_stack_status(self.ctx, fake_assembly.id, mock_client,
'fake_id', [80], mock_logger)
mock_ua.assert_called_once_with(self.ctx, fake_assembly.id,
{'status':
STATES.ERROR_STACK_CREATE_FAILED})
def test_parse_server_ip(self):
handler = heat_handler.Handler()
heat_output = mock.MagicMock()
heat_output._info = {"id": "fake_id",
"outputs": [{"output_value": "192.168.78.21",
"description": "IP", "output_key":
"public_ip"},
{"output_value":
"http://192.168.78.21:5000",
"description": "URL", "output_key":
"URL"}]}
host_url = handler._parse_server_ip(heat_output)
self.assertEqual("192.168.78.21", host_url)
def test_find_id_if_stack_exists(self):
handler = heat_handler.Handler()
assem = mock.MagicMock
assem.heat_stack_component = mock.MagicMock
assem.heat_stack_component.heat_stack_id = '123'
id = handler._find_id_if_stack_exists(assem)
self.assertEqual('123', id)
@mock.patch('solum.common.solum_swiftclient.SwiftClient.delete_object')
@mock.patch('solum.objects.registry')
@mock.patch('solum.deployer.handlers.heat.tlog')
@mock.patch('solum.api.handlers.userlog_handler.UserlogHandler')
def test_delete_app_artifacts_from_swift(self, mock_log_handler, m_log,
mock_registry, mock_swift_delete):
fake_assembly = fakes.FakeAssembly()
fake_image = fakes.FakeImage()
mock_registry.Image.get_by_id.return_value = fake_image
handler = heat_handler.Handler()
handler._delete_app_artifacts_from_swift(self.ctx, mock_log_handler,
fake_assembly)
mock_registry.Image.get_by_id.assert_called_once_with(
mock.ANY, fake_assembly.image_id)
docker_image_name = fake_image.docker_image_name
img_filename = docker_image_name.split('-', 1)[1]
mock_swift_delete.assert_called_once_with('solum_du', img_filename)
@mock.patch('solum.common.solum_swiftclient.SwiftClient.delete_object')
@mock.patch('solum.objects.registry')
@mock.patch('solum.deployer.handlers.heat.tlog')
@mock.patch('solum.api.handlers.userlog_handler.UserlogHandler')
def test_delete_app_artifacts_from_swift_no_image(self, mock_log_handler,
m_log, mock_registry,
mock_swift_delete):
fake_assembly = fakes.FakeAssembly()
fake_assembly.image_id = None
handler = heat_handler.Handler()
handler._delete_app_artifacts_from_swift(self.ctx, mock_log_handler,
fake_assembly)
self.assertFalse(mock_registry.Image.get_by_id.called)
self.assertFalse(mock_swift_delete.called)
@mock.patch('solum.common.solum_swiftclient.SwiftClient.delete_object')
@mock.patch('solum.api.handlers.userlog_handler.UserlogHandler')
@mock.patch('solum.deployer.handlers.heat.tlog')
@mock.patch('solum.objects.registry')
@mock.patch('solum.common.clients.OpenStackClients')
@mock.patch('solum.deployer.handlers.heat.get_heat_client')
def test_destroy_success(self, heat_clnt, mock_client, mock_registry,
m_log, mock_log_handler, mock_swift_delete):
cfg.CONF.set_override('image_storage', 'swift', group='worker')
fake_assem = fakes.FakeAssembly()
mock_registry.Assembly.get_by_id.return_value = fake_assem
fake_image = fakes.FakeImage()
mock_registry.Image.get_by_id.return_value = fake_image
handler = heat_handler.Handler()
mock_log = m_log.TenantLogger.return_value.log
mock_del = heat_clnt.return_value.stacks.delete
mock_del.side_effect = exc.HTTPNotFound
cfg.CONF.deployer.max_attempts = 1
cfg.CONF.deployer.wait_interval = 0
cfg.CONF.deployer.growth_factor = 1.2
with mock.patch.object(handler, "_find_id_if_stack_exists",
return_value=42):
handler.destroy_assembly(self.ctx, fake_assem.id)
self.assertTrue(mock_del.called)
mock_registry.Image.get_by_id.assert_called_once_with(
mock.ANY, fake_assem.image_id)
docker_image_name = fake_image.docker_image_name
img_filename = docker_image_name.split('-', 1)[1]
mock_swift_delete.assert_called_once_with('solum_du', img_filename)
log_handler = mock_log_handler.return_value
log_handler.delete.assert_called_once_with(fake_assem.uuid)
self.assertTrue(mock_log.called)
@mock.patch('solum.common.solum_swiftclient.SwiftClient.delete_object')
@mock.patch('solum.api.handlers.userlog_handler.UserlogHandler')
@mock.patch('solum.deployer.handlers.heat.tlog')
@mock.patch('solum.objects.registry')
@mock.patch('solum.common.clients.OpenStackClients')
@mock.patch('solum.deployer.handlers.heat.get_heat_client')
def test_destroy_stack_not_found(self, heat_clnt, mock_client,
mock_registry, m_log,
mock_log_handler, mock_swift_delete):
cfg.CONF.set_override('image_storage', 'swift', group='worker')
fake_assem = fakes.FakeAssembly()
mock_registry.Assembly.get_by_id.return_value = fake_assem
fake_image = fakes.FakeImage()
mock_registry.Image.get_by_id.return_value = fake_image
handler = heat_handler.Handler()
mock_log = m_log.TenantLogger.return_value.log
mock_del = heat_clnt.return_value.stacks.delete
mock_del.side_effect = exc.HTTPNotFound
cfg.CONF.deployer.max_attempts = 1
cfg.CONF.deployer.wait_interval = 0
cfg.CONF.deployer.growth_factor = 1.2
with mock.patch.object(handler, "_find_id_if_stack_exists",
return_value=42) as mock_find:
handler.destroy_assembly(self.ctx, fake_assem.id)
self.assertTrue(mock_find.called)
self.assertTrue(mock_del.called)
mock_registry.Image.get_by_id.assert_called_once_with(
mock.ANY, fake_assem.image_id)
docker_image_name = fake_image.docker_image_name
img_filename = docker_image_name.split('-', 1)[1]
mock_swift_delete.assert_called_once_with('solum_du', img_filename)
log_handler = mock_log_handler.return_value
log_handler.delete.assert_called_once_with(fake_assem.uuid)
self.assertTrue(mock_log.called)
@mock.patch('solum.deployer.handlers.heat.tlog')
@mock.patch('solum.deployer.handlers.heat.update_assembly')
@mock.patch('solum.objects.registry')
@mock.patch('solum.common.clients.OpenStackClients')
@mock.patch('solum.deployer.handlers.heat.get_heat_client')
def test_destroy_error(self, heat_clnt, mock_client,
mock_registry, mua, m_log):
fake_assem = fakes.FakeAssembly()
mock_registry.Assembly.get_by_id.return_value = fake_assem
mock_del = heat_clnt.return_value.stacks.delete
handler = heat_handler.Handler()
handler._find_id_if_stack_exists = mock.MagicMock(return_value='42')
cfg.CONF.deployer.max_attempts = 1
cfg.CONF.deployer.wait_interval = 0
cfg.CONF.deployer.growth_factor = 1.2
with mock.patch.object(handler, "_find_id_if_stack_exists",
return_value="42"):
handler.destroy_assembly(self.ctx, fake_assem.id)
c1 = mock.call(self.ctx, fake_assem.id,
{'status': STATES.DELETING})
c2 = mock.call(self.ctx, fake_assem.id,
{'status': STATES.ERROR_STACK_DELETE_FAILED})
calls = [c1, c2]
mua.assert_has_calls(calls, any_order=False)
self.assertTrue(mock_del.called)
@mock.patch('solum.common.solum_swiftclient.SwiftClient.delete_object')
@mock.patch('solum.api.handlers.userlog_handler.UserlogHandler')
@mock.patch('solum.deployer.handlers.heat.tlog')
@mock.patch('solum.objects.registry')
@mock.patch('solum.common.clients.OpenStackClients')
def test_destroy_absent_no_wf(self, mock_client, mock_registry,
mock_tlogger, mock_log_handler,
mock_swift_delete):
cfg.CONF.worker.image_storage = "swift"
fake_assem = fakes.FakeAssembly()
mock_registry.Assembly.get_by_id.return_value = fake_assem
fake_image = fakes.FakeImage()
mock_registry.Image.get_by_id.return_value = fake_image
nfe = exception.ResourceNotFound(name=fake_assem.name,
id=fake_assem.id)
mock_registry.Workflow.get_by_assembly_id.side_effect = nfe
mock_log = mock_tlogger.TenantLogger.return_value.log
mock_upload = mock_tlogger.TenantLogger.return_value.upload
mock_log_del = mock_log_handler.return_value.delete
mock_heat = mock_client.return_value.heat
mock_del = mock_heat.return_value.stacks.delete
hh = heat_handler.Handler()
with mock.patch.object(hh, "_find_id_if_stack_exists",
return_value=None):
hh.destroy_assembly(self.ctx, fake_assem.id)
self.assertFalse(mock_del.called)
self.assertTrue(fake_assem.destroy.called)
mock_registry.Image.get_by_id.assert_called_once_with(
mock.ANY, fake_assem.image_id)
docker_image_name = fake_image.docker_image_name
img_filename = docker_image_name.split('-', 1)[1]
mock_swift_delete.assert_called_once_with('solum_du', img_filename)
self.assertTrue(mock_log.called)
self.assertTrue(mock_upload.called)
self.assertEqual(1, mock_log_del.call_count)
self.assertEqual(mock.call(fake_assem.uuid), mock_log_del.call_args)
@mock.patch('solum.common.solum_swiftclient.SwiftClient.delete_object')
@mock.patch('solum.api.handlers.userlog_handler.UserlogHandler')
@mock.patch('solum.deployer.handlers.heat.tlog')
@mock.patch('solum.objects.registry')
@mock.patch('solum.common.clients.OpenStackClients')
def test_destroy_absent_wf_present(self, mock_client, mock_registry,
mock_tlogger, mock_log_handler,
mock_swift_delete):
cfg.CONF.worker.image_storage = "swift"
fake_assem = fakes.FakeAssembly()
mock_registry.Assembly.get_by_id.return_value = fake_assem
fake_image = fakes.FakeImage()
mock_registry.Image.get_by_id.return_value = fake_image
fake_wf = fakes.FakeWorkflow()
mock_registry.Workflow.get_by_assembly_id.side_effect = fake_wf
mock_log = mock_tlogger.TenantLogger.return_value.log
mock_upload = mock_tlogger.TenantLogger.return_value.upload
mock_log_del = mock_log_handler.return_value.delete
mock_heat = mock_client.return_value.heat
mock_del = mock_heat.return_value.stacks.delete
hh = heat_handler.Handler()
with mock.patch.object(hh, "_find_id_if_stack_exists",
return_value=None):
hh.destroy_assembly(self.ctx, fake_assem.id)
self.assertFalse(mock_del.called)
self.assertFalse(fake_assem.destroy.called)
mock_registry.Image.get_by_id.assert_called_once_with(
mock.ANY, fake_assem.image_id)
docker_image_name = fake_image.docker_image_name
img_filename = docker_image_name.split('-', 1)[1]
mock_swift_delete.assert_called_once_with('solum_du', img_filename)
self.assertTrue(mock_log.called)
self.assertTrue(mock_upload.called)
self.assertEqual(1, mock_log_del.call_count)
self.assertEqual(mock.call(fake_assem.uuid), mock_log_del.call_args)
@mock.patch('solum.objects.registry')
@mock.patch('solum.common.clients.OpenStackClients')
@mock.patch('solum.deployer.handlers.heat.get_heat_client')
def test_successful_deploy_destroys_twins(self, heat_clnt,
mock_client, mr):
handler = heat_handler.Handler()
old_app = fakes.FakeAssembly()
old_app.name = 'old app'
old_app.status = 'DEPLOYMENT_COMPLETE'
new_app = fakes.FakeAssembly()
new_app.id = 9
new_app.plan_id = old_app.plan_id
new_app.name = 'new app'
new_app.status = 'DEPLOYMENT_COMPLETE'
cfg.CONF.set_override('wait_interval', 0, group='deployer')
cfg.CONF.set_override('growth_factor', 0, group='deployer')
cfg.CONF.set_override('max_attempts', 1, group='deployer')
self.assertEqual(old_app.plan_id, new_app.plan_id)
self.assertEqual(old_app.plan_uuid, new_app.plan_uuid)
mr.AssemblyList.get_earlier.return_value = [old_app]
mock_st_del = heat_clnt.return_value.stacks.delete
mock_st_get = heat_clnt.return_value.stacks.get
handler.destroy_assembly = mock.MagicMock()
handler._destroy_other_assemblies(self.ctx, new_app.id,
heat_clnt.return_value)
self.assertTrue(mock_st_del.called)
self.assertTrue(mock_st_get.called)
@mock.patch('solum.objects.registry')
@mock.patch('solum.common.clients.OpenStackClients')
@mock.patch('solum.deployer.handlers.heat.get_heat_client')
def test_successful_deploy_preserves_others(self, heat_clnt,
mock_client, mr):
handler = heat_handler.Handler()
old_app = fakes.FakeAssembly()
old_app.name = 'old app'
old_app.plan_id = 1
old_app.id = 1
old_app.status = 'DEPLOYMENT_COMPLETE'
new_app = fakes.FakeAssembly()
new_app.id = 1
new_app.plan_id = 2
new_app.plan_uuid = 'new fake plan uuid'
new_app.name = 'new app'
new_app.status = 'DEPLOYMENT_COMPLETE'
cfg.CONF.set_override('wait_interval', 0, group='deployer')
cfg.CONF.set_override('growth_factor', 0, group='deployer')
cfg.CONF.set_override('max_attempts', 1, group='deployer')
self.assertNotEqual(old_app.plan_id, new_app.plan_id)
self.assertNotEqual(old_app.plan_uuid, new_app.plan_uuid)
mr.AssemblyList.get_earlier.return_value = [old_app]
mr.Assembly.get_by_id.return_value = new_app
mock_heat = mock_client.return_value.heat
mock_st_del = mock_heat.return_value.stacks.delete
mock_st_get = mock_heat.return_value.stacks.get
handler.destroy_assembly = mock.MagicMock()
handler._destroy_other_assemblies(self.ctx, new_app.id, heat_clnt)
self.assertFalse(mock_st_del.called)
self.assertFalse(mock_st_get.called)
@mock.patch('solum.objects.registry')
@mock.patch('solum.deployer.handlers.heat.get_heat_client')
def test_successful_deploy_preserves_notreadies(self, heat_clnt, mr):
handler = heat_handler.Handler()
old_app = fakes.FakeAssembly()
old_app.name = 'old app'
old_app.status = 'BUILDING'
new_app = fakes.FakeAssembly()
new_app.id = 9
new_app.plan_id = old_app.plan_id
new_app.name = 'new app'
new_app.status = 'DEPLOYMENT_COMPLETE'
self.assertEqual(old_app.plan_id, new_app.plan_id)
self.assertEqual(old_app.plan_uuid, new_app.plan_uuid)
mr.AssemblyList.get_earlier.return_value = []
handler.destroy_assembly = mock.MagicMock()
handler._destroy_other_assemblies(self.ctx, new_app.id, heat_clnt)
self.assertEqual(0, handler.destroy_assembly.call_count)
@mock.patch('solum.objects.registry')
@mock.patch('solum.deployer.handlers.heat.get_heat_client')
def test_unsuccessful_deploy_preserves_everyone(self, heat_clnt, mr):
handler = heat_handler.Handler()
old_app = fakes.FakeAssembly()
old_app.name = 'old app'
old_app.status = 'DEPLOYMENT_COMPLETE'
new_app = fakes.FakeAssembly()
new_app.id = 9
new_app.plan_id = old_app.plan_id
new_app.name = 'new app'
new_app.status = 'ERROR'
self.assertEqual(old_app.plan_id, new_app.plan_id)
self.assertEqual(old_app.plan_uuid, new_app.plan_uuid)
mr.AssemblyList.get_earlier.return_value = []
handler.destroy_assembly = mock.MagicMock()
handler._destroy_other_assemblies(self.ctx, new_app.id,
heat_clnt.return_value)
self.assertEqual(0, handler.destroy_assembly.call_count)
def _get_key(self):
return "robust-du-handling.sh"
def _get_fake_template(self):
t = "description: test\n"
t += "resources:\n"
t += " compute_instance:\n"
t += " properties:\n"
t += " user_data:\n"
t += " str_replace:\n"
t += " {template:"
t += " #!/bin/bash -x\n"
t += " #Invoke the container\n"
t += "}\n"
return t
def _get_tmpl_for_docker_reg(self, assem, template, image_id):
template_bdy = yaml.safe_load(template)
run_docker = ('#!/bin/bash -x\n'
'# Invoke the container\n'
'docker run -p 80:80 -d {img}\n'
'wc_notify --data-binary {stat}')
run_docker = run_docker.format(img=image_id,
stat='\'{"status": "SUCCESS"}\'')
comp_instance = template_bdy['resources']['compute_instance']
user_data = comp_instance['properties']['user_data']
user_data['str_replace']['template'] = run_docker
comp_instance['properties']['user_data'] = user_data
template_bdy['resources']['compute_instance'] = comp_instance
template = yaml.dump(template_bdy)
return template
def _get_tmpl_for_swift(self, assem, template, image_loc, image_name):
template_bdy = yaml.safe_load(template)
image_tar_location = image_loc
run_docker = ('#!/bin/bash -x\n'
'# Invoke the container\n'
'wget \"{image_tar_location}\" --output-document={du}\n'
'docker load < {du}\n'
'docker run -p 80:80 -d {du}\n'
'wc_notify --data-binary {stat}')
run_docker = run_docker.format(image_tar_location=image_tar_location,
du=image_name,
stat='\'{"status": "SUCCESS"}\'')
comp_instance = template_bdy['resources']['compute_instance']
user_data = comp_instance['properties']['user_data']
user_data['str_replace']['template'] = run_docker
comp_instance['properties']['user_data'] = user_data
template_bdy['resources']['compute_instance'] = comp_instance
template = yaml.safe_dump(template_bdy,
encoding='utf-8',
allow_unicode=True)
return template
|
|
#!/usr/bin/env python2.7
# -*- coding: utf-8 -*-
# Copyright 2017 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Module for implementing the Coordinate search."""
import os
from string import Template
import sys
from search.common import exceptions
from search.common import geconstants
from search.common import utils
from search.plugin import coordinate_transform
class CoordinateSearch(object):
"""Class for performing the Coordinate search.
Coordinate search supports the following formats:
1. Decimal Degrees (DD)
2. Degrees Minutes Seconds (DMS)
3. Degrees Decimal Minutes (DDM)
4. Military Grid Reference System (MGRS)
5. Universal Transverse Mercator (UTM)
Coordinate search transforms coordinates from DMS, DDM, UTM, MGRS formats to
DD, validates the coordinates and sends the response back to the client.
Depending on the client type, KML or JSONP formats are supported.
"""
NUM_OF_COORDS_IN_LAT_LNG_FORMAT = 2
NUM_OF_COORDS_IN_MGRS_FORMAT = 1
def __init__(self):
"""Inits CoordinateSearch.
Initializes the logger "ge_search".
Initializes templates for kml, placemark templates for KML/JSONP outputs.
"""
self.utils = utils.SearchUtils()
self._transform = coordinate_transform.CoordinateTransform()
configs = self.utils.GetConfigs(
os.path.join(geconstants.SEARCH_CONFIGS_DIR, "CoordinateSearch.conf"))
self._jsonp_call = self.utils.jsonp_functioncall
self._geom = """
<name>%s</name>
<styleUrl>%s</styleUrl>
<Point>
<coordinates>%s,%s</coordinates>
</Point>\
"""
self._json_geom = """
{
"Point": {
"coordinates": "%s,%s"
}
}
"""
self._kml = """
<kml xmlns="http://www.opengis.net/kml/2.2"
xmlns:gx="http://www.google.com/kml/ext/2.2"
xmlns:kml="http://www.opengis.net/kml/2.2"
xmlns:atom="http://www.w3.org/2005/Atom">
<Folder>
<name>Coordinate Search Results</name>
<open>1</open>
<Style id="placemark_label">\
${style}
</Style>\
${placemark}
</Folder>
</kml>
"""
self._kml_template = Template(self._kml)
self._placemark_template = self.utils.placemark_template
self._json_template = self.utils.json_template
self._json_placemark_template = self.utils.json_placemark_template
style_template = self.utils.style_template
self.coordinates_in_lat_lng_format_ = ["DD", "DMS", "DDM"]
self.logger = self.utils.logger
self._style = style_template.substitute(
balloonBgColor=configs.get("balloonstyle.bgcolor"),
balloonTextColor=configs.get("balloonstyle.textcolor"),
balloonText=configs.get("balloonstyle.text"),
iconStyleScale=configs.get("iconstyle.scale"),
iconStyleHref=configs.get("iconstyle.href"),
lineStyleColor=configs.get("linestyle.color"),
lineStyleWidth=configs.get("linestyle.width"),
polyStyleColor=configs.get("polystyle.color"),
polyStyleColorMode=configs.get("polystyle.colormode"),
polyStyleFill=configs.get("polystyle.fill"),
polyStyleOutline=configs.get("polystyle.outline"),
listStyleHref=configs.get("iconstyle.href"))
def HandleSearchRequest(self, environ):
"""Fetches the search tokens from form and performs the coordinate search.
Args:
environ: A list of environment variables as supplied by the
WSGI interface to the coordinate search application interface.
Returns:
search_results: A KML/JSONP formatted string which contains search results.
Raises:
BadQueryException: if the search query is invalid.
"""
search_results = ""
# Fetch all the attributes provided by the user.
parameters = self.utils.GetParameters(environ)
response_type = self.utils.GetResponseType(environ)
# Retrieve the function call back name for JSONP response.
self.f_callback = self.utils.GetCallback(parameters)
original_query = self.utils.GetValue(parameters, "q")
if not original_query:
msg = "Empty search query received."
self.logger.error(msg)
raise exceptions.BadQueryException(msg)
search_status, search_results = self.DoSearch(original_query, response_type)
if not search_status:
folder_name = "Search returned no results."
search_results = self.utils.NoSearchResults(
folder_name, self._style, response_type, self.f_callback)
return (search_results, response_type)
def DoSearch(self, search_query, response_type):
"""Performs the coordinate search.
Args:
search_query: A string containing the search coordinates as
entered by the user.
response_type: Response type can be KML or JSONP, depending on the client.
Returns:
search_results: A KML/JSONP formatted string which contains search results.
Raises:
BadQueryException: if the search query is invalid.
"""
coordinate_type = ""
search_results = ""
input_coordinates = []
decimal_degrees_coordinates = []
search_tokens = self.utils.SearchTokensFromString(search_query)
self.logger.debug("coordinates: %s", ",".join(search_tokens))
input_coordinates = self._transform.GetInputCoordinates(
",".join(search_tokens))
number_of_coordinates = len(input_coordinates)
if number_of_coordinates == 0:
msg = "Incomplete search query %s submitted" % search_query
self.logger.error(msg)
raise exceptions.BadQueryException(msg)
coordinate_type = self._transform.GetInputType(input_coordinates)
self.logger.debug("Coordinate type is %s.", coordinate_type)
if coordinate_type in self.coordinates_in_lat_lng_format_:
reqd_num_of_coordinates = CoordinateSearch.NUM_OF_COORDS_IN_LAT_LNG_FORMAT
else:
reqd_num_of_coordinates = CoordinateSearch.NUM_OF_COORDS_IN_MGRS_FORMAT
if number_of_coordinates > reqd_num_of_coordinates:
self.logger.warning(
"extra search parameters ignored: %s", ",".join(
input_coordinates[reqd_num_of_coordinates:]))
input_coordinates = input_coordinates[:reqd_num_of_coordinates]
elif number_of_coordinates < reqd_num_of_coordinates:
msg = "Incomplete search query %s submitted" % search_query
self.logger.error(msg)
raise exceptions.BadQueryException(msg)
decimal_degrees_coordinates = self._transform.TransformToDecimalDegrees(
coordinate_type, input_coordinates)
search_results = self.ConstructResponse(
response_type, decimal_degrees_coordinates)
search_status = True if search_results else False
return search_status, search_results
def ConstructKMLResponse(self, latitude, longitude):
"""Prepares KML response.
KML response has the below format:
<kml>
<Folder>
<name/>
<StyleURL>
---
</StyleURL>
<Point>
<coordinates/>
</Point>
</Folder>
</kml>
Args:
latitude: latitude in Decimal Degress format.
longitude: longitude in Decimal Degress format.
Returns:
kml_response: KML formatted response.
"""
placemark = ""
kml_response = ""
name = "%s, %s" % (latitude, longitude)
style_url = "#placemark_label"
geom = self._geom % (name, style_url, str(longitude), str(latitude))
placemark = self._placemark_template.substitute(geom=geom)
kml_response = self._kml_template.substitute(
style=self._style, placemark=placemark)
self.logger.info("KML response successfully formatted")
return kml_response
def ConstructJSONPResponse(self, latitude, longitude):
"""Prepares JSONP response.
{
"Folder": {
"name": "X,Y",
"Style": {
"IconStyle": {"scale": "1" },
"LineStyle": {
"color": "7fffff00",
"width": "5" },
"PolyStyle": {
"color": "7f66ffff",
"fill": "1",
"outline": "1" } },
"Placemark": {
"Point": {
"coordinates": "X,Y" } }
}
}
Args:
latitude: latitude in Decimal Degress format.
longitude: longitude in Decimal Degress format.
Returns:
jsonp_response: JSONP formatted response.
"""
placemark = ""
json_response = ""
jsonp_response = ""
folder_name = "%s, %s" % (latitude, longitude)
json_geom = self._json_geom % (latitude, longitude)
placemark = self._json_placemark_template.substitute(
geom=json_geom)
json_response = self._json_template.substitute(
foldername=folder_name, json_placemark=placemark)
# Escape single quotes from json_response.
json_response = json_response.replace("'", "\\'")
jsonp_response = self._jsonp_call % (self.f_callback, json_response)
self.logger.info("JSONP response successfully formatted")
return jsonp_response
def ConstructResponse(self, response_type, decimal_degrees_coordinates):
"""Construct the response based on response_type.
Args:
response_type: Response type can be KML or JSONP, depending on the client.
decimal_degrees_coordinates: List of coordinates in DD(Decimal Degrees)
format.
Returns:
search_results: A KML/JSONP formatted string which contains search results.
"""
search_results = ""
assert response_type in self.utils.output_formats, (
self.logger.error("Invalid response type %s", response_type))
if response_type == "KML":
search_results = self.ConstructKMLResponse(
decimal_degrees_coordinates[0], decimal_degrees_coordinates[1])
elif response_type == "JSONP":
search_results = self.ConstructJSONPResponse(
decimal_degrees_coordinates[0], decimal_degrees_coordinates[1])
return search_results
def main(coords, response_type):
gepobj = CoordinateSearch()
gepobj.DoSearch(coords, response_type)
if __name__ == "__main__":
main(sys.argv[1], sys.argv[2])
|
|
from dbt.utils import filter_null_values, deep_merge, classproperty
from dbt.node_types import NodeType
import dbt.exceptions
from collections.abc import Mapping, Hashable
from dataclasses import dataclass, fields
from typing import (
Optional, TypeVar, Generic, Any, Type, Dict, Union, Iterator, Tuple,
Set
)
from typing_extensions import Protocol
from hologram import JsonSchemaMixin
from hologram.helpers import StrEnum
from dbt.contracts.util import Replaceable
from dbt.contracts.graph.compiled import CompiledNode
from dbt.contracts.graph.parsed import ParsedSourceDefinition, ParsedNode
from dbt.exceptions import InternalException
from dbt import deprecations
class RelationType(StrEnum):
Table = 'table'
View = 'view'
CTE = 'cte'
MaterializedView = 'materializedview'
External = 'external'
class ComponentName(StrEnum):
Database = 'database'
Schema = 'schema'
Identifier = 'identifier'
class HasQuoting(Protocol):
quoting: Dict[str, bool]
class FakeAPIObject(JsonSchemaMixin, Replaceable, Mapping):
# override the mapping truthiness, len is always >1
def __bool__(self):
return True
def __getitem__(self, key):
try:
return getattr(self, key)
except AttributeError:
raise KeyError(key) from None
def __iter__(self):
deprecations.warn('not-a-dictionary', obj=self)
for _, name in self._get_fields():
yield name
def __len__(self):
deprecations.warn('not-a-dictionary', obj=self)
return len(fields(self.__class__))
def incorporate(self, **kwargs):
value = self.to_dict()
value = deep_merge(value, kwargs)
return self.from_dict(value)
T = TypeVar('T')
@dataclass
class _ComponentObject(FakeAPIObject, Generic[T]):
database: T
schema: T
identifier: T
def get_part(self, key: ComponentName) -> T:
if key == ComponentName.Database:
return self.database
elif key == ComponentName.Schema:
return self.schema
elif key == ComponentName.Identifier:
return self.identifier
else:
raise ValueError(
'Got a key of {}, expected one of {}'
.format(key, list(ComponentName))
)
def replace_dict(self, dct: Dict[ComponentName, T]):
kwargs: Dict[str, T] = {}
for k, v in dct.items():
kwargs[str(k)] = v
return self.replace(**kwargs)
@dataclass
class Policy(_ComponentObject[bool]):
database: bool = True
schema: bool = True
identifier: bool = True
@dataclass
class Path(_ComponentObject[Optional[str]]):
database: Optional[str]
schema: Optional[str]
identifier: Optional[str]
def __post_init__(self):
# handle pesky jinja2.Undefined sneaking in here and messing up render
if not isinstance(self.database, (type(None), str)):
raise dbt.exceptions.CompilationException(
'Got an invalid path database: {}'.format(self.database)
)
if not isinstance(self.schema, (type(None), str)):
raise dbt.exceptions.CompilationException(
'Got an invalid path schema: {}'.format(self.schema)
)
if not isinstance(self.identifier, (type(None), str)):
raise dbt.exceptions.CompilationException(
'Got an invalid path identifier: {}'.format(self.identifier)
)
def get_lowered_part(self, key: ComponentName) -> Optional[str]:
part = self.get_part(key)
if part is not None:
part = part.lower()
return part
Self = TypeVar('Self', bound='BaseRelation')
@dataclass(frozen=True, eq=False, repr=False)
class BaseRelation(FakeAPIObject, Hashable):
type: Optional[RelationType]
path: Path
quote_character: str = '"'
include_policy: Policy = Policy()
quote_policy: Policy = Policy()
dbt_created: bool = False
def _is_exactish_match(self, field: ComponentName, value: str) -> bool:
if self.dbt_created and self.quote_policy.get_part(field) is False:
return self.path.get_lowered_part(field) == value.lower()
else:
return self.path.get_part(field) == value
@classmethod
def _get_field_named(cls, field_name):
for field, _ in cls._get_fields():
if field.name == field_name:
return field
# this should be unreachable
raise ValueError(f'BaseRelation has no {field_name} field!')
def __eq__(self, other):
if not isinstance(other, self.__class__):
return False
return self.to_dict() == other.to_dict()
@classmethod
def get_default_quote_policy(cls: Type[Self]) -> Policy:
return cls._get_field_named('quote_policy').default
def get(self, key, default=None):
"""Override `.get` to return a metadata object so we don't break
dbt_utils.
"""
if key == 'metadata':
return {
'type': self.__class__.__name__
}
return super().get(key, default)
def matches(
self,
database: Optional[str] = None,
schema: Optional[str] = None,
identifier: Optional[str] = None,
) -> bool:
search = filter_null_values({
ComponentName.Database: database,
ComponentName.Schema: schema,
ComponentName.Identifier: identifier
})
if not search:
# nothing was passed in
raise dbt.exceptions.RuntimeException(
"Tried to match relation, but no search path was passed!")
exact_match = True
approximate_match = True
for k, v in search.items():
if not self._is_exactish_match(k, v):
exact_match = False
if self.path.get_lowered_part(k) != v.lower():
approximate_match = False
if approximate_match and not exact_match:
target = self.create(
database=database, schema=schema, identifier=identifier
)
dbt.exceptions.approximate_relation_match(target, self)
return exact_match
def replace_path(self, **kwargs):
return self.replace(path=self.path.replace(**kwargs))
def quote(
self: Self,
database: Optional[bool] = None,
schema: Optional[bool] = None,
identifier: Optional[bool] = None,
) -> Self:
policy = filter_null_values({
ComponentName.Database: database,
ComponentName.Schema: schema,
ComponentName.Identifier: identifier
})
new_quote_policy = self.quote_policy.replace_dict(policy)
return self.replace(quote_policy=new_quote_policy)
def include(
self: Self,
database: Optional[bool] = None,
schema: Optional[bool] = None,
identifier: Optional[bool] = None,
) -> Self:
policy = filter_null_values({
ComponentName.Database: database,
ComponentName.Schema: schema,
ComponentName.Identifier: identifier
})
new_include_policy = self.include_policy.replace_dict(policy)
return self.replace(include_policy=new_include_policy)
def information_schema(self, view_name=None) -> 'InformationSchema':
# some of our data comes from jinja, where things can be `Undefined`.
if not isinstance(view_name, str):
view_name = None
# Kick the user-supplied schema out of the information schema relation
# Instead address this as <database>.information_schema by default
info_schema = InformationSchema.from_relation(self, view_name)
return info_schema.incorporate(path={"schema": None})
def information_schema_only(self) -> 'InformationSchema':
return self.information_schema()
def without_identifier(self) -> 'BaseRelation':
"""Return a form of this relation that only has the database and schema
set to included. To get the appropriately-quoted form the schema out of
the result (for use as part of a query), use `.render()`. To get the
raw database or schema name, use `.database` or `.schema`.
The hash of the returned object is the result of render().
"""
return self.include(identifier=False).replace_path(identifier=None)
def _render_iterator(
self
) -> Iterator[Tuple[Optional[ComponentName], Optional[str]]]:
for key in ComponentName:
path_part: Optional[str] = None
if self.include_policy.get_part(key):
path_part = self.path.get_part(key)
if path_part is not None and self.quote_policy.get_part(key):
path_part = self.quoted(path_part)
yield key, path_part
def render(self) -> str:
# if there is nothing set, this will return the empty string.
return '.'.join(
part for _, part in self._render_iterator()
if part is not None
)
def quoted(self, identifier):
return '{quote_char}{identifier}{quote_char}'.format(
quote_char=self.quote_character,
identifier=identifier,
)
@classmethod
def create_from_source(
cls: Type[Self], source: ParsedSourceDefinition, **kwargs: Any
) -> Self:
source_quoting = source.quoting.to_dict()
source_quoting.pop('column', None)
quote_policy = deep_merge(
cls.get_default_quote_policy().to_dict(),
source_quoting,
kwargs.get('quote_policy', {}),
)
return cls.create(
database=source.database,
schema=source.schema,
identifier=source.identifier,
quote_policy=quote_policy,
**kwargs
)
@classmethod
def create_from_node(
cls: Type[Self],
config: HasQuoting,
node: Union[ParsedNode, CompiledNode],
quote_policy: Optional[Dict[str, bool]] = None,
**kwargs: Any,
) -> Self:
if quote_policy is None:
quote_policy = {}
quote_policy = dbt.utils.merge(config.quoting, quote_policy)
return cls.create(
database=node.database,
schema=node.schema,
identifier=node.alias,
quote_policy=quote_policy,
**kwargs)
@classmethod
def create_from(
cls: Type[Self],
config: HasQuoting,
node: Union[CompiledNode, ParsedNode, ParsedSourceDefinition],
**kwargs: Any,
) -> Self:
if node.resource_type == NodeType.Source:
if not isinstance(node, ParsedSourceDefinition):
raise InternalException(
'type mismatch, expected ParsedSourceDefinition but got {}'
.format(type(node))
)
return cls.create_from_source(node, **kwargs)
else:
if not isinstance(node, (ParsedNode, CompiledNode)):
raise InternalException(
'type mismatch, expected ParsedNode or CompiledNode but '
'got {}'.format(type(node))
)
return cls.create_from_node(config, node, **kwargs)
@classmethod
def create(
cls: Type[Self],
database: Optional[str] = None,
schema: Optional[str] = None,
identifier: Optional[str] = None,
type: Optional[RelationType] = None,
**kwargs,
) -> Self:
kwargs.update({
'path': {
'database': database,
'schema': schema,
'identifier': identifier,
},
'type': type,
})
return cls.from_dict(kwargs)
def __repr__(self) -> str:
return "<{} {}>".format(self.__class__.__name__, self.render())
def __hash__(self) -> int:
return hash(self.render())
def __str__(self) -> str:
return self.render()
@property
def database(self) -> Optional[str]:
return self.path.database
@property
def schema(self) -> Optional[str]:
return self.path.schema
@property
def identifier(self) -> Optional[str]:
return self.path.identifier
@property
def table(self) -> Optional[str]:
return self.path.identifier
# Here for compatibility with old Relation interface
@property
def name(self) -> Optional[str]:
return self.identifier
@property
def is_table(self) -> bool:
return self.type == RelationType.Table
@property
def is_cte(self) -> bool:
return self.type == RelationType.CTE
@property
def is_view(self) -> bool:
return self.type == RelationType.View
@classproperty
def Table(cls) -> str:
return str(RelationType.Table)
@classproperty
def CTE(cls) -> str:
return str(RelationType.CTE)
@classproperty
def View(cls) -> str:
return str(RelationType.View)
@classproperty
def External(cls) -> str:
return str(RelationType.External)
@classproperty
def get_relation_type(cls) -> Type[RelationType]:
return RelationType
Info = TypeVar('Info', bound='InformationSchema')
@dataclass(frozen=True, eq=False, repr=False)
class InformationSchema(BaseRelation):
information_schema_view: Optional[str] = None
def __post_init__(self):
if not isinstance(self.information_schema_view, (type(None), str)):
raise dbt.exceptions.CompilationException(
'Got an invalid name: {}'.format(self.information_schema_view)
)
@classmethod
def get_path(
cls, relation: BaseRelation, information_schema_view: Optional[str]
) -> Path:
return Path(
database=relation.database,
schema=relation.schema,
identifier='INFORMATION_SCHEMA',
)
@classmethod
def get_include_policy(
cls,
relation,
information_schema_view: Optional[str],
) -> Policy:
return relation.include_policy.replace(
database=relation.database is not None,
schema=False,
identifier=True,
)
@classmethod
def get_quote_policy(
cls,
relation,
information_schema_view: Optional[str],
) -> Policy:
return relation.quote_policy.replace(
identifier=False,
)
@classmethod
def from_relation(
cls: Type[Info],
relation: BaseRelation,
information_schema_view: Optional[str],
) -> Info:
include_policy = cls.get_include_policy(
relation, information_schema_view
)
quote_policy = cls.get_quote_policy(relation, information_schema_view)
path = cls.get_path(relation, information_schema_view)
return cls(
type=RelationType.View,
path=path,
include_policy=include_policy,
quote_policy=quote_policy,
information_schema_view=information_schema_view,
)
def _render_iterator(self):
for k, v in super()._render_iterator():
yield k, v
yield None, self.information_schema_view
class SchemaSearchMap(Dict[InformationSchema, Set[Optional[str]]]):
"""A utility class to keep track of what information_schema tables to
search for what schemas. The schema values are all lowercased to avoid
duplication.
"""
def add(self, relation: BaseRelation):
key = relation.information_schema_only()
if key not in self:
self[key] = set()
schema: Optional[str] = None
if relation.schema is not None:
schema = relation.schema.lower()
self[key].add(schema)
def search(
self
) -> Iterator[Tuple[InformationSchema, Optional[str]]]:
for information_schema_name, schemas in self.items():
for schema in schemas:
yield information_schema_name, schema
def flatten(self):
new = self.__class__()
# make sure we don't have duplicates
seen = {r.database.lower() for r in self if r.database}
if len(seen) > 1:
dbt.exceptions.raise_compiler_error(str(seen))
for information_schema_name, schema in self.search():
path = {
'database': information_schema_name.database,
'schema': schema
}
new.add(information_schema_name.incorporate(
path=path,
quote_policy={'database': False},
include_policy={'database': False},
))
return new
|
|
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Convolutional Neural Network Estimator for MNIST, built with tf.layers."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from absl import app as absl_app
from absl import flags
flags.DEFINE_string(name="job-dir", default="/tmp", help="AI Platform Training passes this to the training script.")
import tensorflow as tf # pylint: disable=g-bad-import-order
from official.mnist import dataset
from official.utils.flags import core as flags_core
from official.utils.logs import hooks_helper
from official.utils.misc import distribution_utils
from official.utils.misc import model_helpers
LEARNING_RATE = 1e-4
def create_model(data_format):
"""Model to recognize digits in the MNIST dataset.
Network structure is equivalent to:
https://github.com/tensorflow/tensorflow/blob/r1.5/tensorflow/examples/tutorials/mnist/mnist_deep.py
and
https://github.com/tensorflow/models/blob/master/tutorials/image/mnist/convolutional.py
But uses the tf.keras API.
Args:
data_format: Either 'channels_first' or 'channels_last'. 'channels_first' is
typically faster on GPUs while 'channels_last' is typically faster on
CPUs. See
https://www.tensorflow.org/performance/performance_guide#data_formats
Returns:
A tf.keras.Model.
"""
if data_format == 'channels_first':
input_shape = [1, 28, 28]
else:
assert data_format == 'channels_last'
input_shape = [28, 28, 1]
l = tf.keras.layers
max_pool = l.MaxPooling2D(
(2, 2), (2, 2), padding='same', data_format=data_format)
# The model consists of a sequential chain of layers, so tf.keras.Sequential
# (a subclass of tf.keras.Model) makes for a compact description.
return tf.keras.Sequential(
[
l.Reshape(
target_shape=input_shape,
input_shape=(28 * 28,)),
l.Conv2D(
32,
5,
padding='same',
data_format=data_format,
activation=tf.nn.relu),
max_pool,
l.Conv2D(
64,
5,
padding='same',
data_format=data_format,
activation=tf.nn.relu),
max_pool,
l.Flatten(),
l.Dense(1024, activation=tf.nn.relu),
l.Dropout(0.4),
l.Dense(10)
])
def define_mnist_flags():
flags_core.define_base()
flags_core.define_performance(num_parallel_calls=False)
flags_core.define_image()
flags.adopt_module_key_flags(flags_core)
flags_core.set_defaults(data_dir='/tmp/mnist_data',
model_dir='/tmp/mnist_model',
batch_size=100,
train_epochs=40)
def model_fn(features, labels, mode, params):
"""The model_fn argument for creating an Estimator."""
model = create_model(params['data_format'])
image = features
if isinstance(image, dict):
image = features['image']
if mode == tf.estimator.ModeKeys.PREDICT:
logits = model(image, training=False)
predictions = {
'classes': tf.argmax(logits, axis=1),
'probabilities': tf.nn.softmax(logits),
}
return tf.estimator.EstimatorSpec(
mode=tf.estimator.ModeKeys.PREDICT,
predictions=predictions,
export_outputs={
'classify': tf.estimator.export.PredictOutput(predictions)
})
if mode == tf.estimator.ModeKeys.TRAIN:
optimizer = tf.train.AdamOptimizer(learning_rate=LEARNING_RATE)
logits = model(image, training=True)
loss = tf.losses.sparse_softmax_cross_entropy(labels=labels, logits=logits)
accuracy = tf.metrics.accuracy(
labels=labels, predictions=tf.argmax(logits, axis=1))
# Name tensors to be logged with LoggingTensorHook.
tf.identity(LEARNING_RATE, 'learning_rate')
tf.identity(loss, 'cross_entropy')
tf.identity(accuracy[1], name='train_accuracy')
# Save accuracy scalar to Tensorboard output.
tf.summary.scalar('train_accuracy', accuracy[1])
return tf.estimator.EstimatorSpec(
mode=tf.estimator.ModeKeys.TRAIN,
loss=loss,
train_op=optimizer.minimize(loss, tf.train.get_or_create_global_step()))
if mode == tf.estimator.ModeKeys.EVAL:
logits = model(image, training=False)
loss = tf.losses.sparse_softmax_cross_entropy(labels=labels, logits=logits)
return tf.estimator.EstimatorSpec(
mode=tf.estimator.ModeKeys.EVAL,
loss=loss,
eval_metric_ops={
'accuracy':
tf.metrics.accuracy(
labels=labels, predictions=tf.argmax(logits, axis=1)),
})
def run_mnist(flags_obj):
"""Run MNIST training and eval loop.
Args:
flags_obj: An object containing parsed flag values.
"""
model_helpers.apply_clean(flags_obj)
model_function = model_fn
session_config = tf.ConfigProto(
inter_op_parallelism_threads=flags_obj.inter_op_parallelism_threads,
intra_op_parallelism_threads=flags_obj.intra_op_parallelism_threads,
allow_soft_placement=True)
distribution_strategy = distribution_utils.get_distribution_strategy(
flags_core.get_num_gpus(flags_obj), flags_obj.all_reduce_alg)
run_config = tf.estimator.RunConfig(
train_distribute=distribution_strategy, session_config=session_config)
data_format = flags_obj.data_format
if data_format is None:
data_format = ('channels_first'
if tf.test.is_built_with_cuda() else 'channels_last')
mnist_classifier = tf.estimator.Estimator(
model_fn=model_function,
model_dir=flags_obj.model_dir,
config=run_config,
params={
'data_format': data_format,
})
# Set up training and evaluation input functions.
def train_input_fn():
"""Prepare data for training."""
# When choosing shuffle buffer sizes, larger sizes result in better
# randomness, while smaller sizes use less memory. MNIST is a small
# enough dataset that we can easily shuffle the full epoch.
ds = dataset.train(flags_obj.data_dir)
ds = ds.cache().shuffle(buffer_size=50000).batch(flags_obj.batch_size)
# Iterate through the dataset a set number (`epochs_between_evals`) of times
# during each training session.
ds = ds.repeat(flags_obj.epochs_between_evals)
return ds
def eval_input_fn():
return dataset.test(flags_obj.data_dir).batch(
flags_obj.batch_size).make_one_shot_iterator().get_next()
# Set up hook that outputs training logs every 100 steps.
train_hooks = hooks_helper.get_train_hooks(
flags_obj.hooks, model_dir=flags_obj.model_dir,
batch_size=flags_obj.batch_size)
# Train and evaluate model.
for _ in range(flags_obj.train_epochs // flags_obj.epochs_between_evals):
mnist_classifier.train(input_fn=train_input_fn, hooks=train_hooks)
eval_results = mnist_classifier.evaluate(input_fn=eval_input_fn)
print('\nEvaluation results:\n\t%s\n' % eval_results)
if model_helpers.past_stop_threshold(flags_obj.stop_threshold,
eval_results['accuracy']):
break
# Export the model
if flags_obj.export_dir is not None:
image = tf.placeholder(tf.float32, [None, 28, 28])
input_fn = tf.estimator.export.build_raw_serving_input_receiver_fn({
'image': image,
})
mnist_classifier.export_savedmodel(flags_obj.export_dir, input_fn,
strip_default_attrs=True)
def main(_):
run_mnist(flags.FLAGS)
if __name__ == '__main__':
tf.logging.set_verbosity(tf.logging.INFO)
define_mnist_flags()
absl_app.run(main)
|
|
# Copyright (c) 2014 GigaSpaces Technologies Ltd. All rights reserved
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# * See the License for the specific language governing permissions and
# * limitations under the License.
import mock
import unittest
from cloudify import exceptions as cfy_exc
from tests.unittests import test_mock_base
from network_plugin import public_nat
from network_plugin import utils
import network_plugin
import vcloud_plugin_common
from IPy import IP
class NetworkPluginPublicNatMockTestCase(test_mock_base.TestBase):
def test_is_rule_exists(self):
rule_inlist = self.generate_nat_rule(
'SNAT', 'external', '22', 'internal', '11', 'TCP'
)
# exist
self.assertTrue(
public_nat._is_rule_exists(
[rule_inlist], 'SNAT', 'external', '22', 'internal',
'11', 'TCP')
)
# not exist
self.assertFalse(
public_nat._is_rule_exists(
[rule_inlist], 'SNAT', 'external', '22', 'internal',
'11', 'UDP')
)
def test_get_original_port_for_delete(self):
# no replacement
fake_ctx = self.generate_relation_context()
with mock.patch(
'network_plugin.public_nat.ctx', fake_ctx
):
self.assertEqual(
public_nat._get_original_port_for_delete("10.1.1.1", "11"),
"11"
)
# replacement for other
fake_ctx = self.generate_relation_context()
fake_ctx._target.instance.runtime_properties = {
public_nat.PORT_REPLACEMENT: {
("10.1.1.2", "11"): '12'
}
}
with mock.patch(
'network_plugin.public_nat.ctx', fake_ctx
):
self.assertEqual(
public_nat._get_original_port_for_delete("10.1.1.1", "11"),
"11"
)
# replacement for other
fake_ctx = self.generate_relation_context()
fake_ctx._target.instance.runtime_properties = {
public_nat.PORT_REPLACEMENT: {
("10.1.1.2", "11"): '12'
}
}
with mock.patch(
'network_plugin.public_nat.ctx', fake_ctx
):
self.assertEqual(
public_nat._get_original_port_for_delete("10.1.1.2", "11"),
"12"
)
def test_get_original_port_for_create(self):
gateway = mock.Mock()
rule_inlist = self.generate_nat_rule(
'SNAT', 'external', 'any', 'internal', '11', 'TCP'
)
gateway.get_nat_rules = mock.MagicMock(return_value=[rule_inlist])
# exeption about same port
with self.assertRaises(cfy_exc.NonRecoverableError):
public_nat._get_original_port_for_create(
gateway, 'SNAT', 'external', 'any', 'internal', '11', 'TCP'
)
# everythiong fine with different port
self.assertEqual(
public_nat._get_original_port_for_create(
gateway, 'SNAT', 'external', 'any', 'internal', '12', 'TCP'
),
'any'
)
# relink some port to other
# port have not used yet
self.assertEqual(
public_nat._get_original_port_for_create(
gateway, 'SNAT', 'external', 10, 'internal', '12', 'TCP'
),
10
)
def test_get_original_port_for_create_with_ctx(self):
# with replace, but without replace table - up port +1
fake_ctx = self.generate_relation_context()
fake_ctx._target.instance.runtime_properties = {
public_nat.PORT_REPLACEMENT: {}
}
gateway = mock.Mock()
rule_inlist = self.generate_nat_rule(
'SNAT', 'external', 10, 'internal', 11, 'TCP'
)
gateway.get_nat_rules = mock.MagicMock(return_value=[rule_inlist])
with mock.patch(
'network_plugin.public_nat.ctx', fake_ctx
):
self.assertEqual(
public_nat._get_original_port_for_create(
gateway, 'SNAT', 'external', '10', 'internal', '11', 'TCP'
),
11
)
self.assertEqual(
fake_ctx._target.instance.runtime_properties,
{
public_nat.PORT_REPLACEMENT: {
('external', '10'): 11
}
}
)
# same but without replacement at all
fake_ctx._target.instance.runtime_properties = {}
with mock.patch(
'network_plugin.public_nat.ctx', fake_ctx
):
self.assertEqual(
public_nat._get_original_port_for_create(
gateway, 'SNAT', 'external', '10', 'internal', '11', 'TCP'
),
11
)
self.assertEqual(
fake_ctx._target.instance.runtime_properties,
{
public_nat.PORT_REPLACEMENT: {
('external', '10'): 11
}
}
)
# we dont have enought ports
rule_inlist = self.generate_nat_rule(
'SNAT', 'external', utils.MAX_PORT_NUMBER,
'internal', 11, 'TCP'
)
gateway.get_nat_rules = mock.MagicMock(return_value=[rule_inlist])
fake_ctx._target.instance.runtime_properties = {}
with mock.patch(
'network_plugin.public_nat.ctx', fake_ctx
):
with self.assertRaises(cfy_exc.NonRecoverableError):
public_nat._get_original_port_for_create(
gateway, 'SNAT', 'external',
utils.MAX_PORT_NUMBER, 'internal', '11', 'TCP'
)
def test_get_gateway_ip_range(self):
gate = mock.Mock()
# empty list of networks
gate.get_dhcp_pools = mock.MagicMock(return_value=[])
self.assertEqual(
public_nat._get_gateway_ip_range(gate, 'something'),
None
)
# exist other network
gate.get_dhcp_pools = mock.MagicMock(return_value=[
self.genarate_pool(
'test_network', '127.0.0.1', '127.0.0.255'
)
])
self.assertEqual(
public_nat._get_gateway_ip_range(gate, 'something'),
None
)
# exist correct network
self.assertEqual(
public_nat._get_gateway_ip_range(gate, 'test_network'),
(IP('127.0.0.1'), IP('127.0.0.255'))
)
def test_obtain_public_ip(self):
fake_ctx = self.generate_relation_context()
fake_ctx._target.instance.runtime_properties = {
network_plugin.PUBLIC_IP: '192.168.1.1'
}
gateway = mock.Mock()
fake_client = mock.Mock()
# exist some ip for delete
self.assertEqual(
public_nat._obtain_public_ip(
fake_client, fake_ctx, gateway, network_plugin.DELETE
),
'192.168.1.1'
)
# no ip for delete
fake_ctx._target.instance.runtime_properties = {}
with self.assertRaises(cfy_exc.NonRecoverableError):
public_nat._obtain_public_ip(
fake_client, fake_ctx, gateway, network_plugin.DELETE
)
# unknow operation
with self.assertRaises(cfy_exc.NonRecoverableError):
public_nat._obtain_public_ip(
fake_client, fake_ctx, gateway, 'unknow operation'
)
# exist some public ip
fake_ctx._target.node.properties = {
'nat': {
network_plugin.PUBLIC_IP: '192.168.1.1'
}
}
self.assertEqual(
public_nat._obtain_public_ip(
fake_client, fake_ctx, gateway, network_plugin.CREATE
),
'192.168.1.1'
)
# no public ip yet
fake_ctx._target.node.properties = {
'nat': {}
}
fake_ctx._source.node.properties = {
'vcloud_config': {
'vdc': 'vdc_name',
'service_type': vcloud_plugin_common.SUBSCRIPTION_SERVICE_TYPE
}
}
gateway.get_public_ips = mock.MagicMock(return_value=[
'10.18.1.1', '10.18.1.2'
])
rule_inlist = self.generate_nat_rule(
'DNAT', '10.18.1.1', 'any', 'internal', '11', 'TCP'
)
gateway.get_nat_rules = mock.MagicMock(
return_value=[rule_inlist]
)
with mock.patch(
'network_plugin.public_nat.ctx', fake_ctx
):
with mock.patch(
'vcloud_plugin_common.ctx', fake_ctx
):
self.assertEqual(
public_nat._obtain_public_ip(
fake_client, fake_ctx, gateway,
network_plugin.CREATE
),
'10.18.1.2'
)
def test_get_network_ip_range(self):
# dont have ip range for this network
fake_client = self.generate_client()
self.assertEqual(
public_nat._get_network_ip_range(
fake_client, "some_org", "some_network"
),
None
)
fake_client.get_networks.assert_called_with("some_org")
# different network
network = self.generate_fake_client_network(
name="some", start_ip="127.1.1.1", end_ip="127.1.1.255"
)
fake_client.get_networks = mock.MagicMock(return_value=[network])
self.assertEqual(
public_nat._get_network_ip_range(
fake_client, "some_org", "some_network"
),
None
)
# correct network name
fake_client.get_networks = mock.MagicMock(return_value=[network])
self.assertEqual(
public_nat._get_network_ip_range(
fake_client, "some_org", "some"
),
(IP('127.1.1.1'), IP('127.1.1.255'))
)
def test_create_ip_range(self):
# context
fake_ctx = self.generate_relation_context()
fake_ctx._source.instance.runtime_properties = {
network_plugin.network.VCLOUD_NETWORK_NAME: "some"
}
fake_ctx._source.node.properties = {
'vcloud_config': {
'org': 'some_org'
}
}
fake_ctx._target.instance.runtime_properties = {}
# vca client
fake_client = self.generate_client()
# gateway
gate = fake_client._vdc_gateway
gate.get_dhcp_pools = mock.MagicMock(return_value=[])
network = self.generate_fake_client_network(
name="some", start_ip="127.1.1.100", end_ip="127.1.1.200"
)
fake_client.get_networks = mock.MagicMock(return_value=[network])
with mock.patch(
'network_plugin.public_nat.ctx', fake_ctx
):
with mock.patch(
'vcloud_plugin_common.ctx', fake_ctx
):
# empty gateway dhcp pool
# vca pool: 127.1.1.100..127.1.1.200
self.assertEqual(
public_nat._create_ip_range(fake_client, gate),
'127.1.1.100 - 127.1.1.200'
)
fake_client.get_networks.assert_called_with("some_org")
# network from gate
gate.get_dhcp_pools = mock.MagicMock(return_value=[
self.genarate_pool(
"some", '127.1.1.1', '127.1.1.255'
)
])
self.assertEqual(
public_nat._create_ip_range(fake_client, gate),
'127.1.1.1 - 127.1.1.255'
)
# network not exist
network = self.generate_fake_client_network(
name="other", start_ip="127.1.1.100",
end_ip="127.1.1.200"
)
fake_client.get_networks = mock.MagicMock(
return_value=[network]
)
with self.assertRaises(cfy_exc.NonRecoverableError):
public_nat._create_ip_range(fake_client, gate)
def test_save_configuration(self):
def _context_for_delete(service_type):
"""
create correct context for delete
"""
fake_ctx = self.generate_relation_context()
self.set_services_conf_result(
gateway, vcloud_plugin_common.TASK_STATUS_SUCCESS
)
fake_ctx._target.instance.runtime_properties = {
network_plugin.PUBLIC_IP: "1.2.3.4"
}
properties = {
'vcloud_config': {
'org': 'some_org',
}
}
if service_type:
properties['vcloud_config']['service_type'] = service_type
fake_ctx._source.node.properties = properties
return fake_ctx
def _ip_exist_in_runtime(fake_ctx):
"""
ip still exist in ctx
"""
runtime_properties = fake_ctx._target.instance.runtime_properties
return network_plugin.PUBLIC_IP in runtime_properties
fake_client = self.generate_client()
gateway = fake_client._vdc_gateway
# cant save configuration: server busy
self.set_services_conf_result(
gateway, None
)
self.set_gateway_busy(gateway)
fake_ctx = self.generate_relation_context()
with mock.patch(
'network_plugin.public_nat.ctx', fake_ctx
):
self.prepare_retry(fake_ctx)
public_nat._save_configuration(
gateway, fake_client, "any", "any"
)
self.check_retry_realy_called(fake_ctx)
# operation create
fake_ctx = self.generate_relation_context()
self.set_services_conf_result(
gateway, vcloud_plugin_common.TASK_STATUS_SUCCESS
)
with mock.patch(
'network_plugin.public_nat.ctx', fake_ctx
):
# success save configuration
public_nat._save_configuration(
gateway, fake_client, network_plugin.CREATE, "1.2.3.4"
)
self.assertEqual(
fake_ctx._target.instance.runtime_properties,
{
network_plugin.PUBLIC_IP: "1.2.3.4"
}
)
# delete - subscription service
fake_ctx = _context_for_delete(
vcloud_plugin_common.SUBSCRIPTION_SERVICE_TYPE
)
with mock.patch(
'network_plugin.public_nat.ctx', fake_ctx
):
with mock.patch(
'vcloud_plugin_common.ctx', fake_ctx
):
public_nat._save_configuration(
gateway, fake_client, network_plugin.DELETE, "1.2.3.4"
)
self.assertFalse(_ip_exist_in_runtime(fake_ctx))
# delete - without service
fake_ctx = _context_for_delete(None)
with mock.patch(
'network_plugin.public_nat.ctx', fake_ctx
):
with mock.patch(
'vcloud_plugin_common.ctx', fake_ctx
):
public_nat._save_configuration(
gateway, fake_client, network_plugin.DELETE, "1.2.3.4"
)
self.assertFalse(_ip_exist_in_runtime(fake_ctx))
# delete - ondemand service - nat
fake_ctx = _context_for_delete(
vcloud_plugin_common.ONDEMAND_SERVICE_TYPE
)
fake_ctx._target.node.properties = {
'nat': {
network_plugin.PUBLIC_IP: "1.2.3.4"
}
}
with mock.patch(
'network_plugin.public_nat.ctx', fake_ctx
):
with mock.patch(
'vcloud_plugin_common.ctx', fake_ctx
):
public_nat._save_configuration(
gateway, fake_client, network_plugin.DELETE, "1.2.3.4"
)
self.assertFalse(_ip_exist_in_runtime(fake_ctx))
# delete - ondemand - not nat
gateway.deallocate_public_ip = mock.MagicMock(
return_value=self.generate_task(
vcloud_plugin_common.TASK_STATUS_SUCCESS
)
)
fake_ctx = _context_for_delete(
vcloud_plugin_common.ONDEMAND_SERVICE_TYPE
)
fake_ctx._target.node.properties = {
'nat': {}
}
with mock.patch(
'network_plugin.public_nat.ctx', fake_ctx
):
with mock.patch(
'vcloud_plugin_common.ctx', fake_ctx
):
public_nat._save_configuration(
gateway, fake_client, network_plugin.DELETE, "1.2.3.4"
)
gateway.deallocate_public_ip.assert_called_with("1.2.3.4")
self.assertFalse(_ip_exist_in_runtime(fake_ctx))
def test_nat_network_operation(self):
fake_client = self.generate_client()
gateway = fake_client._vdc_gateway
# used wrong operation
with self.assertRaises(cfy_exc.NonRecoverableError):
public_nat.nat_network_operation(
fake_client, gateway, "unknow", "DNAT", "1.2.3.4",
"2.3.4.5", "11", "11", "TCP"
)
# run correct operation/rule
fake_ctx = self.generate_relation_context()
for operation in [network_plugin.DELETE, network_plugin.CREATE]:
for rule_type in ["SNAT", "DNAT"]:
with mock.patch(
'network_plugin.public_nat.ctx', fake_ctx
):
with mock.patch(
'vcloud_plugin_common.ctx', fake_ctx
):
public_nat.nat_network_operation(
fake_client, gateway, operation, rule_type,
"1.2.3.4", "2.3.4.5", "11", "11", "TCP"
)
if rule_type == "DNAT":
if operation == network_plugin.DELETE:
gateway.del_nat_rule.assert_called_with(
'DNAT', '1.2.3.4', '11', '2.3.4.5', '11',
'TCP'
)
else:
gateway.add_nat_rule.assert_called_with(
'DNAT', '1.2.3.4', '11', '2.3.4.5', '11',
'TCP'
)
else:
if operation == network_plugin.DELETE:
gateway.del_nat_rule.assert_called_with(
'SNAT', '2.3.4.5', 'any', '1.2.3.4', 'any',
'any'
)
else:
gateway.add_nat_rule.assert_called_with(
'SNAT', '2.3.4.5', 'any', '1.2.3.4', 'any',
'any'
)
def generate_client_and_context_server(self):
"""
for test prepare_server_operation based operations
"""
fake_client = self.generate_client(vms_networks=[{
'is_connected': True,
'network_name': 'network_name',
'is_primary': True,
'ip': '1.1.1.1'
}])
self.set_network_routed_in_client(fake_client)
fake_ctx = self.generate_relation_context()
fake_ctx._target.node.properties = {
'nat': {
'edge_gateway': 'gateway'
}
}
fake_ctx._source.node.properties = {
'vcloud_config': {
'vdc': 'vdc_name',
'service_type': vcloud_plugin_common.SUBSCRIPTION_SERVICE_TYPE
}
}
fake_ctx._target.instance.runtime_properties = {
network_plugin.PUBLIC_IP: '192.168.1.1'
}
self.set_services_conf_result(
fake_client._vdc_gateway,
vcloud_plugin_common.TASK_STATUS_SUCCESS
)
return fake_client, fake_ctx
def test_prepare_server_operation(self):
fake_client, fake_ctx = self.generate_client_and_context_server()
# no rules for update
with mock.patch(
'network_plugin.public_nat.ctx', fake_ctx
):
with mock.patch(
'vcloud_plugin_common.ctx', fake_ctx
):
with self.assertRaises(cfy_exc.NonRecoverableError):
public_nat.prepare_server_operation(
fake_client, network_plugin.DELETE
)
# with some rules
fake_client, fake_ctx = self.generate_client_and_context_server()
fake_ctx._target.node.properties = {
'nat': {
'edge_gateway': 'gateway'
},
'rules': [{
'type': 'DNAT',
'protocol': 'TCP',
'original_port': "11",
'translated_port': "11"
}]
}
with mock.patch(
'network_plugin.public_nat.ctx', fake_ctx
):
with mock.patch(
'vcloud_plugin_common.ctx', fake_ctx
):
public_nat.prepare_server_operation(
fake_client, network_plugin.DELETE
)
fake_client._vdc_gateway.del_nat_rule.assert_called_with(
'DNAT', '192.168.1.1', '11', '1.1.1.1', '11', 'TCP'
)
# with default value
fake_client, fake_ctx = self.generate_client_and_context_server()
fake_ctx._target.instance.runtime_properties = {
network_plugin.PUBLIC_IP: '192.168.1.1'
}
fake_ctx._target.node.properties = {
'nat': {
'edge_gateway': 'gateway'
},
'rules': [{
'type': 'DNAT'
}]
}
with mock.patch(
'network_plugin.public_nat.ctx', fake_ctx
):
with mock.patch(
'vcloud_plugin_common.ctx', fake_ctx
):
public_nat.prepare_server_operation(
fake_client, network_plugin.DELETE
)
fake_client._vdc_gateway.del_nat_rule.assert_called_with(
'DNAT', '192.168.1.1', 'any', '1.1.1.1', 'any', 'any'
)
def generate_client_and_context_network(self):
"""
for test prepare_network_operation based operations
"""
fake_client = self.generate_client(vms_networks=[{
'is_connected': True,
'network_name': 'network_name',
'is_primary': True,
'ip': '1.1.1.1'
}])
self.set_network_routed_in_client(fake_client)
gate = fake_client._vdc_gateway
gate.get_dhcp_pools = mock.MagicMock(return_value=[])
network = self.generate_fake_client_network(
name="some", start_ip="127.1.1.100", end_ip="127.1.1.200"
)
fake_client.get_networks = mock.MagicMock(return_value=[network])
self.set_services_conf_result(
fake_client._vdc_gateway,
vcloud_plugin_common.TASK_STATUS_SUCCESS
)
# ctx
fake_ctx = self.generate_relation_context()
fake_ctx._source.instance.runtime_properties = {
network_plugin.network.VCLOUD_NETWORK_NAME: "some"
}
fake_ctx._source.node.properties = {
'vcloud_config': {
'org': 'some_org',
'vdc': 'vdc_name',
'service_type': vcloud_plugin_common.SUBSCRIPTION_SERVICE_TYPE
}
}
fake_ctx._target.node.properties = {
'nat': {
'edge_gateway': 'gateway'
}
}
fake_ctx._target.instance.runtime_properties = {
network_plugin.PUBLIC_IP: '192.168.1.1'
}
return fake_client, fake_ctx
def test_prepare_network_operation(self):
# no rules
fake_client, fake_ctx = self.generate_client_and_context_network()
with mock.patch(
'network_plugin.public_nat.ctx', fake_ctx
):
with mock.patch(
'vcloud_plugin_common.ctx', fake_ctx
):
with self.assertRaises(cfy_exc.NonRecoverableError):
public_nat.prepare_network_operation(
fake_client, network_plugin.DELETE
)
# rules with default values
fake_client, fake_ctx = self.generate_client_and_context_network()
fake_ctx._target.node.properties = {
'nat': {
'edge_gateway': 'gateway'
},
'rules': [{
'type': 'DNAT'
}]
}
with mock.patch(
'network_plugin.public_nat.ctx', fake_ctx
):
with mock.patch(
'vcloud_plugin_common.ctx', fake_ctx
):
public_nat.prepare_network_operation(
fake_client, network_plugin.DELETE
)
fake_client._vdc_gateway.del_nat_rule.assert_called_with(
'DNAT', '192.168.1.1', 'any', '127.1.1.100 - 127.1.1.200',
'any', 'any'
)
def test_creation_validation(self):
fake_client = self.generate_client()
# no nat
fake_ctx = self.generate_node_context(
properties={
'vcloud_config': {
'vdc': 'vdc_name'
}
}
)
with mock.patch(
'vcloud_plugin_common.VcloudAirClient.get',
mock.MagicMock(return_value=fake_client)
):
with self.assertRaises(cfy_exc.NonRecoverableError):
public_nat.creation_validation(ctx=fake_ctx)
# no gateway
fake_ctx = self.generate_node_context(
properties={
'vcloud_config': {
'vdc': 'vdc_name'
},
'nat': {
'some_field': 'something'
}
}
)
with mock.patch(
'vcloud_plugin_common.VcloudAirClient.get',
mock.MagicMock(return_value=fake_client)
):
with self.assertRaises(cfy_exc.NonRecoverableError):
public_nat.creation_validation(ctx=fake_ctx)
# wrong ip
fake_ctx = self.generate_node_context(properties={
'vcloud_config': {
'vdc': 'vdc_name',
'service_type': vcloud_plugin_common.SUBSCRIPTION_SERVICE_TYPE
},
'nat': {
'edge_gateway': 'gateway',
network_plugin.PUBLIC_IP: 'any'
}
})
with mock.patch(
'vcloud_plugin_common.VcloudAirClient.get',
mock.MagicMock(return_value=fake_client)
):
with self.assertRaises(cfy_exc.NonRecoverableError):
public_nat.creation_validation(ctx=fake_ctx)
# no free ip
fake_ctx = self.generate_node_context(properties={
'vcloud_config': {
'vdc': 'vdc_name',
'service_type': vcloud_plugin_common.SUBSCRIPTION_SERVICE_TYPE
},
'nat': {
'edge_gateway': 'gateway'
}
})
with mock.patch(
'vcloud_plugin_common.VcloudAirClient.get',
mock.MagicMock(return_value=fake_client)
):
with self.assertRaises(cfy_exc.NonRecoverableError):
public_nat.creation_validation(ctx=fake_ctx)
# no rules
fake_ctx = self.generate_node_context(properties={
'vcloud_config': {
'vdc': 'vdc_name',
'service_type': vcloud_plugin_common.SUBSCRIPTION_SERVICE_TYPE
},
'nat': {
'edge_gateway': 'gateway',
network_plugin.PUBLIC_IP: '10.12.2.1'
}
})
with mock.patch(
'vcloud_plugin_common.VcloudAirClient.get',
mock.MagicMock(return_value=fake_client)
):
with self.assertRaises(cfy_exc.NonRecoverableError):
public_nat.creation_validation(ctx=fake_ctx)
# wrong protocol
fake_ctx = self.generate_node_context(properties={
'vcloud_config': {
'vdc': 'vdc_name',
'service_type': vcloud_plugin_common.SUBSCRIPTION_SERVICE_TYPE
},
'nat': {
'edge_gateway': 'gateway',
network_plugin.PUBLIC_IP: '10.12.2.1'
},
'rules': [{
'type': 'DNAT',
'protocol': "some"
}]
})
with mock.patch(
'vcloud_plugin_common.VcloudAirClient.get',
mock.MagicMock(return_value=fake_client)
):
with self.assertRaises(cfy_exc.NonRecoverableError):
public_nat.creation_validation(ctx=fake_ctx)
# wrong original_port
fake_ctx = self.generate_node_context(properties={
'vcloud_config': {
'vdc': 'vdc_name',
'service_type': vcloud_plugin_common.SUBSCRIPTION_SERVICE_TYPE
},
'nat': {
'edge_gateway': 'gateway',
network_plugin.PUBLIC_IP: '10.12.2.1'
},
'rules': [{
'type': 'DNAT',
'protocol': "TCP",
'original_port': 'some'
}]
})
with mock.patch(
'vcloud_plugin_common.VcloudAirClient.get',
mock.MagicMock(return_value=fake_client)
):
with self.assertRaises(cfy_exc.NonRecoverableError):
public_nat.creation_validation(ctx=fake_ctx)
# wrong original_port
fake_ctx = self.generate_node_context(properties={
'vcloud_config': {
'vdc': 'vdc_name',
'service_type': vcloud_plugin_common.SUBSCRIPTION_SERVICE_TYPE
},
'nat': {
'edge_gateway': 'gateway',
network_plugin.PUBLIC_IP: '10.12.2.1'
},
'rules': [{
'type': 'DNAT',
'protocol': "TCP",
'original_port': 11,
'translated_port': 'some'
}]
})
with mock.patch(
'vcloud_plugin_common.VcloudAirClient.get',
mock.MagicMock(return_value=fake_client)
):
with self.assertRaises(cfy_exc.NonRecoverableError):
public_nat.creation_validation(ctx=fake_ctx)
# fine
fake_ctx = self.generate_node_context(properties={
'vcloud_config': {
'vdc': 'vdc_name',
'service_type': vcloud_plugin_common.SUBSCRIPTION_SERVICE_TYPE
},
'nat': {
'edge_gateway': 'gateway',
network_plugin.PUBLIC_IP: '10.12.2.1'
},
'rules': [{
'type': 'DNAT',
'protocol': "TCP",
'original_port': 11,
'translated_port': 12
}]
})
with mock.patch(
'vcloud_plugin_common.VcloudAirClient.get',
mock.MagicMock(return_value=fake_client)
):
public_nat.creation_validation(ctx=fake_ctx)
def test_server_disconnect_from_nat(self):
fake_client, fake_ctx = self.generate_client_and_context_server()
fake_ctx._target.instance.runtime_properties = {
network_plugin.PUBLIC_IP: '192.168.1.1'
}
fake_ctx._target.node.properties = {
'nat': {
'edge_gateway': 'gateway'
},
'rules': [{
'type': 'DNAT'
}]
}
with mock.patch(
'vcloud_plugin_common.VcloudAirClient.get',
mock.MagicMock(return_value=fake_client)
):
public_nat.server_disconnect_from_nat(ctx=fake_ctx)
fake_client._vdc_gateway.del_nat_rule.assert_called_with(
'DNAT', '192.168.1.1', 'any', '1.1.1.1', 'any', 'any'
)
def test_server_connect_to_nat(self):
fake_client, fake_ctx = self.generate_client_and_context_server()
fake_ctx._target.instance.runtime_properties = {
network_plugin.PUBLIC_IP: '192.168.1.1'
}
fake_ctx._target.node.properties = {
'nat': {
'edge_gateway': 'gateway'
},
'rules': [{
'type': 'DNAT'
}]
}
fake_client._vdc_gateway.get_public_ips = mock.MagicMock(
return_value=['10.18.1.1']
)
with mock.patch(
'vcloud_plugin_common.VcloudAirClient.get',
mock.MagicMock(return_value=fake_client)
):
public_nat.server_connect_to_nat(ctx=fake_ctx)
fake_client._vdc_gateway.add_nat_rule.assert_called_with(
'DNAT', '10.18.1.1', 'any', '1.1.1.1', 'any', 'any'
)
def test_net_disconnect_from_nat(self):
# use external
fake_client, fake_ctx = self.generate_client_and_context_network()
fake_ctx._target.node.properties = {
'use_external_resource': True
}
with mock.patch(
'vcloud_plugin_common.VcloudAirClient.get',
mock.MagicMock(return_value=fake_client)
):
public_nat.net_disconnect_from_nat(ctx=fake_ctx)
# no external
fake_client, fake_ctx = self.generate_client_and_context_network()
fake_ctx._target.node.properties = {
'nat': {
'edge_gateway': 'gateway'
},
'rules': [{
'type': 'DNAT'
}]
}
with mock.patch(
'vcloud_plugin_common.VcloudAirClient.get',
mock.MagicMock(return_value=fake_client)
):
public_nat.net_disconnect_from_nat(ctx=fake_ctx)
fake_client._vdc_gateway.del_nat_rule.assert_called_with(
'DNAT', '192.168.1.1', 'any', '127.1.1.100 - 127.1.1.200',
'any', 'any'
)
def test_net_connect_to_nat(self):
# use external
fake_client, fake_ctx = self.generate_client_and_context_network()
fake_ctx._target.node.properties = {
'use_external_resource': True
}
with mock.patch(
'vcloud_plugin_common.VcloudAirClient.get',
mock.MagicMock(return_value=fake_client)
):
public_nat.net_connect_to_nat(ctx=fake_ctx)
# no external
fake_client, fake_ctx = self.generate_client_and_context_network()
fake_ctx._target.node.properties = {
'nat': {
'edge_gateway': 'gateway'
},
'rules': [{
'type': 'DNAT'
}]
}
fake_client._vdc_gateway.get_public_ips = mock.MagicMock(return_value=[
'10.18.1.1'
])
with mock.patch(
'vcloud_plugin_common.VcloudAirClient.get',
mock.MagicMock(return_value=fake_client)
):
public_nat.net_connect_to_nat(ctx=fake_ctx)
fake_client._vdc_gateway.add_nat_rule.assert_called_with(
'DNAT', '10.18.1.1', 'any', '127.1.1.100 - 127.1.1.200',
'any', 'any'
)
if __name__ == '__main__':
unittest.main()
|
|
import sys
import warnings
from math import log
from numbers import Number
import numpy as np
from scipy.optimize import fmin_l_bfgs_b
from sklearn.base import clone
from sklearn.base import is_regressor
from joblib import Parallel, delayed
from sklearn.multioutput import MultiOutputRegressor
from sklearn.utils import check_random_state
from ..acquisition import _gaussian_acquisition
from ..acquisition import gaussian_acquisition_1D
from ..learning import GaussianProcessRegressor
from ..space import Categorical
from ..space import Space
from ..utils import check_x_in_space
from ..utils import cook_estimator
from ..utils import create_result
from ..utils import has_gradients
from ..utils import is_listlike
from ..utils import is_2Dlistlike
from ..utils import normalize_dimensions
from ..utils import cook_initial_point_generator
class Optimizer(object):
"""Run bayesian optimisation loop.
An `Optimizer` represents the steps of a bayesian optimisation loop. To
use it you need to provide your own loop mechanism. The various
optimisers provided by `skopt` use this class under the hood.
Use this class directly if you want to control the iterations of your
bayesian optimisation loop.
Parameters
----------
dimensions : list, shape (n_dims,)
List of search space dimensions.
Each search dimension can be defined either as
- a `(lower_bound, upper_bound)` tuple (for `Real` or `Integer`
dimensions),
- a `(lower_bound, upper_bound, "prior")` tuple (for `Real`
dimensions),
- as a list of categories (for `Categorical` dimensions), or
- an instance of a `Dimension` object (`Real`, `Integer` or
`Categorical`).
base_estimator : `"GP"`, `"RF"`, `"ET"`, `"GBRT"` or sklearn regressor, \
default: `"GP"`
Should inherit from :obj:`sklearn.base.RegressorMixin`.
In addition the `predict` method, should have an optional `return_std`
argument, which returns `std(Y | x)` along with `E[Y | x]`.
If base_estimator is one of ["GP", "RF", "ET", "GBRT"], a default
surrogate model of the corresponding type is used corresponding to what
is used in the minimize functions.
n_random_starts : int, default: 10
.. deprecated:: 0.6
use `n_initial_points` instead.
n_initial_points : int, default: 10
Number of evaluations of `func` with initialization points
before approximating it with `base_estimator`. Initial point
generator can be changed by setting `initial_point_generator`.
initial_point_generator : str, InitialPointGenerator instance, \
default: `"random"`
Sets a initial points generator. Can be either
- `"random"` for uniform random numbers,
- `"sobol"` for a Sobol' sequence,
- `"halton"` for a Halton sequence,
- `"hammersly"` for a Hammersly sequence,
- `"lhs"` for a latin hypercube sequence,
- `"grid"` for a uniform grid sequence
acq_func : string, default: `"gp_hedge"`
Function to minimize over the posterior distribution. Can be either
- `"LCB"` for lower confidence bound.
- `"EI"` for negative expected improvement.
- `"PI"` for negative probability of improvement.
- `"gp_hedge"` Probabilistically choose one of the above three
acquisition functions at every iteration.
- The gains `g_i` are initialized to zero.
- At every iteration,
- Each acquisition function is optimised independently to
propose an candidate point `X_i`.
- Out of all these candidate points, the next point `X_best` is
chosen by :math:`softmax(\\eta g_i)`
- After fitting the surrogate model with `(X_best, y_best)`,
the gains are updated such that :math:`g_i -= \\mu(X_i)`
- `"EIps"` for negated expected improvement per second to take into
account the function compute time. Then, the objective function is
assumed to return two values, the first being the objective value and
the second being the time taken in seconds.
- `"PIps"` for negated probability of improvement per second. The
return type of the objective function is assumed to be similar to
that of `"EIps"`
acq_optimizer : string, `"sampling"` or `"lbfgs"`, default: `"auto"`
Method to minimize the acquisition function. The fit model
is updated with the optimal value obtained by optimizing `acq_func`
with `acq_optimizer`.
- If set to `"auto"`, then `acq_optimizer` is configured on the
basis of the base_estimator and the space searched over.
If the space is Categorical or if the estimator provided based on
tree-models then this is set to be `"sampling"`.
- If set to `"sampling"`, then `acq_func` is optimized by computing
`acq_func` at `n_points` randomly sampled points.
- If set to `"lbfgs"`, then `acq_func` is optimized by
- Sampling `n_restarts_optimizer` points randomly.
- `"lbfgs"` is run for 20 iterations with these points as initial
points to find local minima.
- The optimal of these local minima is used to update the prior.
random_state : int, RandomState instance, or None (default)
Set random state to something other than None for reproducible
results.
n_jobs : int, default: 1
The number of jobs to run in parallel in the base_estimator,
if the base_estimator supports n_jobs as parameter and
base_estimator was given as string.
If -1, then the number of jobs is set to the number of cores.
acq_func_kwargs : dict
Additional arguments to be passed to the acquisition function.
acq_optimizer_kwargs : dict
Additional arguments to be passed to the acquisition optimizer.
model_queue_size : int or None, default: None
Keeps list of models only as long as the argument given. In the
case of None, the list has no capped length.
Attributes
----------
Xi : list
Points at which objective has been evaluated.
yi : scalar
Values of objective at corresponding points in `Xi`.
models : list
Regression models used to fit observations and compute acquisition
function.
space : Space
An instance of :class:`skopt.space.Space`. Stores parameter search
space used to sample points, bounds, and type of parameters.
"""
def __init__(self, dimensions, base_estimator="gp",
n_random_starts=None, n_initial_points=10,
initial_point_generator="random",
n_jobs=1, acq_func="gp_hedge",
acq_optimizer="auto",
random_state=None,
model_queue_size=None,
acq_func_kwargs=None,
acq_optimizer_kwargs=None):
args = locals().copy()
del args['self']
self.specs = {"args": args,
"function": "Optimizer"}
self.rng = check_random_state(random_state)
# Configure acquisition function
# Store and creat acquisition function set
self.acq_func = acq_func
self.acq_func_kwargs = acq_func_kwargs
allowed_acq_funcs = ["gp_hedge", "EI", "LCB", "PI", "EIps", "PIps"]
if self.acq_func not in allowed_acq_funcs:
raise ValueError("expected acq_func to be in %s, got %s" %
(",".join(allowed_acq_funcs), self.acq_func))
# treat hedging method separately
if self.acq_func == "gp_hedge":
self.cand_acq_funcs_ = ["EI", "LCB", "PI"]
self.gains_ = np.zeros(3)
else:
self.cand_acq_funcs_ = [self.acq_func]
if acq_func_kwargs is None:
acq_func_kwargs = dict()
self.eta = acq_func_kwargs.get("eta", 1.0)
# Configure counters of points
# Check `n_random_starts` deprecation first
if n_random_starts is not None:
warnings.warn(("n_random_starts will be removed in favour of "
"n_initial_points."),
DeprecationWarning)
n_initial_points = n_random_starts
if n_initial_points < 0:
raise ValueError(
"Expected `n_initial_points` >= 0, got %d" % n_initial_points)
self._n_initial_points = n_initial_points
self.n_initial_points_ = n_initial_points
# Configure estimator
# build base_estimator if doesn't exist
if isinstance(base_estimator, str):
base_estimator = cook_estimator(
base_estimator, space=dimensions,
random_state=self.rng.randint(0, np.iinfo(np.int32).max),
n_jobs=n_jobs)
# check if regressor
if not is_regressor(base_estimator) and base_estimator is not None:
raise ValueError(
"%s has to be a regressor." % base_estimator)
# treat per second acqusition function specially
is_multi_regressor = isinstance(base_estimator, MultiOutputRegressor)
if "ps" in self.acq_func and not is_multi_regressor:
self.base_estimator_ = MultiOutputRegressor(base_estimator)
else:
self.base_estimator_ = base_estimator
# Configure optimizer
# decide optimizer based on gradient information
if acq_optimizer == "auto":
if has_gradients(self.base_estimator_):
acq_optimizer = "lbfgs"
else:
acq_optimizer = "sampling"
if acq_optimizer not in ["lbfgs", "sampling"]:
raise ValueError("Expected acq_optimizer to be 'lbfgs' or "
"'sampling', got {0}".format(acq_optimizer))
if (not has_gradients(self.base_estimator_) and
acq_optimizer != "sampling"):
raise ValueError("The regressor {0} should run with "
"acq_optimizer"
"='sampling'.".format(type(base_estimator)))
self.acq_optimizer = acq_optimizer
# record other arguments
if acq_optimizer_kwargs is None:
acq_optimizer_kwargs = dict()
self.n_points = acq_optimizer_kwargs.get("n_points", 10000)
self.n_restarts_optimizer = acq_optimizer_kwargs.get(
"n_restarts_optimizer", 5)
self.n_jobs = acq_optimizer_kwargs.get("n_jobs", 1)
self.acq_optimizer_kwargs = acq_optimizer_kwargs
# Configure search space
# normalize space if GP regressor
if isinstance(self.base_estimator_, GaussianProcessRegressor):
dimensions = normalize_dimensions(dimensions)
self.space = Space(dimensions)
self._initial_samples = None
self._initial_point_generator = cook_initial_point_generator(
initial_point_generator)
if self._initial_point_generator is not None:
transformer = self.space.get_transformer()
self._initial_samples = self._initial_point_generator.generate(
self.space.dimensions, n_initial_points,
random_state=self.rng.randint(0, np.iinfo(np.int32).max))
self.space.set_transformer(transformer)
# record categorical and non-categorical indices
self._cat_inds = []
self._non_cat_inds = []
for ind, dim in enumerate(self.space.dimensions):
if isinstance(dim, Categorical):
self._cat_inds.append(ind)
else:
self._non_cat_inds.append(ind)
# Initialize storage for optimization
if not isinstance(model_queue_size, (int, type(None))):
raise TypeError("model_queue_size should be an int or None, "
"got {}".format(type(model_queue_size)))
self.max_model_queue_size = model_queue_size
self.models = []
self.Xi = []
self.yi = []
# Initialize cache for `ask` method responses
# This ensures that multiple calls to `ask` with n_points set
# return same sets of points. Reset to {} at every call to `tell`.
self.cache_ = {}
def copy(self, random_state=None):
"""Create a shallow copy of an instance of the optimizer.
Parameters
----------
random_state : int, RandomState instance, or None (default)
Set the random state of the copy.
"""
optimizer = Optimizer(
dimensions=self.space.dimensions,
base_estimator=self.base_estimator_,
n_initial_points=self.n_initial_points_,
initial_point_generator=self._initial_point_generator,
acq_func=self.acq_func,
acq_optimizer=self.acq_optimizer,
acq_func_kwargs=self.acq_func_kwargs,
acq_optimizer_kwargs=self.acq_optimizer_kwargs,
random_state=random_state
)
optimizer._initial_samples = self._initial_samples
if hasattr(self, "gains_"):
optimizer.gains_ = np.copy(self.gains_)
if self.Xi:
optimizer._tell(self.Xi, self.yi)
return optimizer
def ask(self, n_points=None, strategy="cl_min"):
"""Query point or multiple points at which objective should be evaluated.
n_points : int or None, default: None
Number of points returned by the ask method.
If the value is None, a single point to evaluate is returned.
Otherwise a list of points to evaluate is returned of size
n_points. This is useful if you can evaluate your objective in
parallel, and thus obtain more objective function evaluations per
unit of time.
strategy : string, default: "cl_min"
Method to use to sample multiple points (see also `n_points`
description). This parameter is ignored if n_points = None.
Supported options are `"cl_min"`, `"cl_mean"` or `"cl_max"`.
- If set to `"cl_min"`, then constant liar strategy is used
with lie objective value being minimum of observed objective
values. `"cl_mean"` and `"cl_max"` means mean and max of values
respectively. For details on this strategy see:
https://hal.archives-ouvertes.fr/hal-00732512/document
With this strategy a copy of optimizer is created, which is
then asked for a point, and the point is told to the copy of
optimizer with some fake objective (lie), the next point is
asked from copy, it is also told to the copy with fake
objective and so on. The type of lie defines different
flavours of `cl_x` strategies.
"""
if n_points is None:
return self._ask()
supported_strategies = ["cl_min", "cl_mean", "cl_max"]
if not (isinstance(n_points, int) and n_points > 0):
raise ValueError(
"n_points should be int > 0, got " + str(n_points)
)
if strategy not in supported_strategies:
raise ValueError(
"Expected parallel_strategy to be one of " +
str(supported_strategies) + ", " + "got %s" % strategy
)
# Caching the result with n_points not None. If some new parameters
# are provided to the ask, the cache_ is not used.
if (n_points, strategy) in self.cache_:
return self.cache_[(n_points, strategy)]
# Copy of the optimizer is made in order to manage the
# deletion of points with "lie" objective (the copy of
# oiptimizer is simply discarded)
opt = self.copy(random_state=self.rng.randint(0,
np.iinfo(np.int32).max))
X = []
for i in range(n_points):
x = opt.ask()
X.append(x)
ti_available = "ps" in self.acq_func and len(opt.yi) > 0
ti = [t for (_, t) in opt.yi] if ti_available else None
if strategy == "cl_min":
y_lie = np.min(opt.yi) if opt.yi else 0.0 # CL-min lie
t_lie = np.min(ti) if ti is not None else log(sys.float_info.max)
elif strategy == "cl_mean":
y_lie = np.mean(opt.yi) if opt.yi else 0.0 # CL-mean lie
t_lie = np.mean(ti) if ti is not None else log(sys.float_info.max)
else:
y_lie = np.max(opt.yi) if opt.yi else 0.0 # CL-max lie
t_lie = np.max(ti) if ti is not None else log(sys.float_info.max)
# Lie to the optimizer.
if "ps" in self.acq_func:
# Use `_tell()` instead of `tell()` to prevent repeated
# log transformations of the computation times.
opt._tell(x, (y_lie, t_lie))
else:
opt._tell(x, y_lie)
self.cache_ = {(n_points, strategy): X} # cache_ the result
return X
def _ask(self):
"""Suggest next point at which to evaluate the objective.
Return a random point while not at least `n_initial_points`
observations have been `tell`ed, after that `base_estimator` is used
to determine the next point.
"""
if self._n_initial_points > 0 or self.base_estimator_ is None:
# this will not make a copy of `self.rng` and hence keep advancing
# our random state.
if self._initial_samples is None:
return self.space.rvs(random_state=self.rng)[0]
else:
# The samples are evaluated starting form initial_samples[0]
return self._initial_samples[
len(self._initial_samples) - self._n_initial_points]
else:
if not self.models:
raise RuntimeError("Random evaluations exhausted and no "
"model has been fit.")
next_x = self._next_x
min_delta_x = min([self.space.distance(next_x, xi)
for xi in self.Xi])
if abs(min_delta_x) <= 1e-8:
warnings.warn("The objective has been evaluated "
"at this point before.")
# return point computed from last call to tell()
return next_x
def tell(self, x, y, fit=True):
"""Record an observation (or several) of the objective function.
Provide values of the objective function at points suggested by
`ask()` or other points. By default a new model will be fit to all
observations. The new model is used to suggest the next point at
which to evaluate the objective. This point can be retrieved by calling
`ask()`.
To add observations without fitting a new model set `fit` to False.
To add multiple observations in a batch pass a list-of-lists for `x`
and a list of scalars for `y`.
Parameters
----------
x : list or list-of-lists
Point at which objective was evaluated.
y : scalar or list
Value of objective at `x`.
fit : bool, default: True
Fit a model to observed evaluations of the objective. A model will
only be fitted after `n_initial_points` points have been told to
the optimizer irrespective of the value of `fit`.
"""
check_x_in_space(x, self.space)
self._check_y_is_valid(x, y)
# take the logarithm of the computation times
if "ps" in self.acq_func:
if is_2Dlistlike(x):
y = [[val, log(t)] for (val, t) in y]
elif is_listlike(x):
y = list(y)
y[1] = log(y[1])
return self._tell(x, y, fit=fit)
def _tell(self, x, y, fit=True):
"""Perform the actual work of incorporating one or more new points.
See `tell()` for the full description.
This method exists to give access to the internals of adding points
by side stepping all input validation and transformation."""
if "ps" in self.acq_func:
if is_2Dlistlike(x):
self.Xi.extend(x)
self.yi.extend(y)
self._n_initial_points -= len(y)
elif is_listlike(x):
self.Xi.append(x)
self.yi.append(y)
self._n_initial_points -= 1
# if y isn't a scalar it means we have been handed a batch of points
elif is_listlike(y) and is_2Dlistlike(x):
self.Xi.extend(x)
self.yi.extend(y)
self._n_initial_points -= len(y)
elif is_listlike(x):
self.Xi.append(x)
self.yi.append(y)
self._n_initial_points -= 1
else:
raise ValueError("Type of arguments `x` (%s) and `y` (%s) "
"not compatible." % (type(x), type(y)))
# optimizer learned something new - discard cache
self.cache_ = {}
# after being "told" n_initial_points we switch from sampling
# random points to using a surrogate model
if (fit and self._n_initial_points <= 0 and
self.base_estimator_ is not None):
transformed_bounds = np.array(self.space.transformed_bounds)
est = clone(self.base_estimator_)
with warnings.catch_warnings():
warnings.simplefilter("ignore")
est.fit(self.space.transform(self.Xi), self.yi)
if hasattr(self, "next_xs_") and self.acq_func == "gp_hedge":
self.gains_ -= est.predict(np.vstack(self.next_xs_))
if self.max_model_queue_size is None:
self.models.append(est)
elif len(self.models) < self.max_model_queue_size:
self.models.append(est)
else:
# Maximum list size obtained, remove oldest model.
self.models.pop(0)
self.models.append(est)
# even with BFGS as optimizer we want to sample a large number
# of points and then pick the best ones as starting points
X = self.space.transform(self.space.rvs(
n_samples=self.n_points, random_state=self.rng))
self.next_xs_ = []
for cand_acq_func in self.cand_acq_funcs_:
values = _gaussian_acquisition(
X=X, model=est, y_opt=np.min(self.yi),
acq_func=cand_acq_func,
acq_func_kwargs=self.acq_func_kwargs)
# Find the minimum of the acquisition function by randomly
# sampling points from the space
if self.acq_optimizer == "sampling":
next_x = X[np.argmin(values)]
# Use BFGS to find the mimimum of the acquisition function, the
# minimization starts from `n_restarts_optimizer` different
# points and the best minimum is used
elif self.acq_optimizer == "lbfgs":
x0 = X[np.argsort(values)[:self.n_restarts_optimizer]]
with warnings.catch_warnings():
warnings.simplefilter("ignore")
results = Parallel(n_jobs=self.n_jobs)(
delayed(fmin_l_bfgs_b)(
gaussian_acquisition_1D, x,
args=(est, np.min(self.yi), cand_acq_func,
self.acq_func_kwargs),
bounds=self.space.transformed_bounds,
approx_grad=False,
maxiter=20)
for x in x0)
cand_xs = np.array([r[0] for r in results])
cand_acqs = np.array([r[1] for r in results])
next_x = cand_xs[np.argmin(cand_acqs)]
# lbfgs should handle this but just in case there are
# precision errors.
if not self.space.is_categorical:
next_x = np.clip(
next_x, transformed_bounds[:, 0],
transformed_bounds[:, 1])
self.next_xs_.append(next_x)
if self.acq_func == "gp_hedge":
logits = np.array(self.gains_)
logits -= np.max(logits)
exp_logits = np.exp(self.eta * logits)
probs = exp_logits / np.sum(exp_logits)
next_x = self.next_xs_[np.argmax(self.rng.multinomial(1,
probs))]
else:
next_x = self.next_xs_[0]
# note the need for [0] at the end
self._next_x = self.space.inverse_transform(
next_x.reshape((1, -1)))[0]
# Pack results
result = create_result(self.Xi, self.yi, self.space, self.rng,
models=self.models)
result.specs = self.specs
return result
def _check_y_is_valid(self, x, y):
"""Check if the shape and types of x and y are consistent."""
if "ps" in self.acq_func:
if is_2Dlistlike(x):
if not (np.ndim(y) == 2 and np.shape(y)[1] == 2):
raise TypeError("expected y to be a list of (func_val, t)")
elif is_listlike(x):
if not (np.ndim(y) == 1 and len(y) == 2):
raise TypeError("expected y to be (func_val, t)")
# if y isn't a scalar it means we have been handed a batch of points
elif is_listlike(y) and is_2Dlistlike(x):
for y_value in y:
if not isinstance(y_value, Number):
raise ValueError("expected y to be a list of scalars")
elif is_listlike(x):
if not isinstance(y, Number):
raise ValueError("`func` should return a scalar")
else:
raise ValueError("Type of arguments `x` (%s) and `y` (%s) "
"not compatible." % (type(x), type(y)))
def run(self, func, n_iter=1):
"""Execute ask() + tell() `n_iter` times"""
for _ in range(n_iter):
x = self.ask()
self.tell(x, func(x))
result = create_result(self.Xi, self.yi, self.space, self.rng,
models=self.models)
result.specs = self.specs
return result
def update_next(self):
"""Updates the value returned by opt.ask(). Useful if a parameter
was updated after ask was called."""
self.cache_ = {}
# Ask for a new next_x.
# We only need to overwrite _next_x if it exists.
if hasattr(self, '_next_x'):
opt = self.copy(random_state=self.rng)
self._next_x = opt._next_x
def get_result(self):
"""Returns the same result that would be returned by opt.tell()
but without calling tell
Returns
-------
res : `OptimizeResult`, scipy object
OptimizeResult instance with the required information.
"""
result = create_result(self.Xi, self.yi, self.space, self.rng,
models=self.models)
result.specs = self.specs
return result
|
|
from pyaccessories.TimeLog import Timer
import os
from RedmineAPI.RedmineAPI import RedmineInterface
from pyaccessories.SaveLoad import SaveLoad
from main import AutoSNVPhyl
import base64
import requests
# TODO documentation
class Run(object):
def main(self):
if self.first_run == 'yes':
choice = 'y'
else:
self.t.time_print("Would you like to set the redmine api key? (y/n)")
choice = input()
if choice == 'y':
self.t.time_print("Enter your redmine api key (will be encrypted to file)")
self.redmine_api_key = input()
# Encode and send to json file
self.loader.redmine_api_key_encrypted = self.encode(self.key, self.redmine_api_key).decode('utf-8')
self.loader.first_run = 'no'
self.loader.dump(self.config_json)
else:
# Import and decode from file
self.redmine_api_key = self.decode(self.key, self.redmine_api_key)
import re
if not re.match(r'^[a-z0-9]{40}$', self.redmine_api_key):
self.t.time_print("Invalid Redmine API key!")
exit(1)
self.redmine = RedmineInterface('http://redmine.biodiversity.agr.gc.ca/', self.redmine_api_key)
self.main_loop()
@staticmethod
def generate_args(inputs):
import argparse
args = argparse.Namespace()
args.reference = inputs['reference']
args.history_name = inputs['name']
args.noextract = False
args.manual = False # Change this to true if you want to manually run the snvphyl
return args
@staticmethod
def get_input(input_file, redmine_id):
mode = 'none'
regex = r"^(2\d{3}-\w{2,10}-\d{3,4})$"
inputs = {
'reference': None,
'fastqs': list(),
'name': str(redmine_id)
}
import re
for line in input_file:
# Check for mode changes
if line.lower().startswith('reference') and len(line) < len('reference') + 3:
mode = 'ref'
continue
elif line.lower().startswith('compare') and len(line) < len('compare') + 3:
mode = 'comp'
continue
elif line.lower() == '':
# Blank line
mode = 'none'
continue
if inputs['reference'] is not None and len(inputs['fastqs']) > 0 and mode == 'none':
# Finished gathering all input
break
# Get seq-id
if mode == 'ref':
if re.match(regex, line):
inputs['reference'] = line
else:
raise ValueError("Invalid seq-id \"%s\"" % line)
elif mode == 'comp':
if re.match(regex, line):
inputs['fastqs'].append(line)
else:
pass
raise ValueError("Invalid seq-id \"%s\"" % line)
if inputs['reference'] is None or len(inputs['fastqs']) < 1:
raise ValueError("Invalid format for redmine request.")
return inputs
def completed_response(self, result_path, redmine_id):
from RedmineAPI.RedmineAPI import RedmineUploadError
notes = "Completed running SNVPhyl. Results stored at %s" % os.path.join("NAS/bio_requests/%s" %
redmine_id)
try:
self.redmine.upload_file(result_path, redmine_id, 'application/zip',
file_name_once_uploaded="SNVPhyl_%s_Results.zip" % redmine_id)
except RedmineUploadError:
notes = "Couldn't upload your file to redmine. Results stored at %s" % \
os.path.join("NAS/bio_requests/%s" % redmine_id)
# Assign it back to the author
get = self.redmine.get_issue_data(redmine_id)
self.redmine.update_issue(redmine_id, notes, status_change=4, assign_to_id=get['issue']['author']['id'])
def run_snvphyl(self, inputs):
# Parse input
args = self.generate_args(inputs)
# noinspection PyBroadException
from main import AutoSNVPhylError
try:
runner = AutoSNVPhyl(args, inputs=inputs['fastqs'])
result_path = runner.run()
# SNVPhyl finished, copy the zip to the NAS
import shutil
bio_request_folder = os.path.join(self.nas_mnt, 'bio_requests', inputs['name'])
# Create folder with redmine id
self.t.time_print("Creating directory %s" % bio_request_folder)
if not os.path.exists(os.path.join(bio_request_folder)):
os.makedirs(bio_request_folder)
# Copy results to bio_request folder
self.t.time_print("Copying %s to %s" % (result_path, bio_request_folder))
shutil.copy(result_path, bio_request_folder)
# Respond on redmine
self.completed_response(result_path, inputs['name'])
except Exception as e:
import traceback
self.t.time_print("[Warning] AutoSNVPhyl had a problem, continuing redmine api anyways.")
self.t.time_print("[AutoSNVPhyl Error Dump]\n" + traceback.format_exc())
# Send response
if type(e) == AutoSNVPhylError or ValueError:
msg = str(e)
else:
msg = traceback.format_exc()
# Set it to feedback and assign it back to the author
get = self.redmine.get_issue_data(inputs['name'])
self.redmine.update_issue(
inputs['name'],
notes="There was a problem with your SNVPhyl. Please create a new issue on"
" Redmine to re-run it.\n%s" % msg,
status_change=4,
assign_to_id=get['issue']['author']['id']
)
def main_loop(self):
import time
while True:
self.clear_space()
self.make_call()
self.t.time_print("Waiting for next check.")
time.sleep(self.seconds_between_redmine_checks)
def clear_space(self):
from bioblend.galaxy import GalaxyInstance
from bioblend import ConnectionError
gi = GalaxyInstance(self.loader.get('ip', default='http://192.168.1.3:48888/'), key=self.loader.get('api_key'))
self.t.time_print("Clearing space on Galaxy")
while True:
try:
available = gi.histories.get_histories() # Ping galaxy
break
except ConnectionError as e:
if e.status_code == 403: # Invalid API key
self.t.time_print("Invalid Galaxy API Key!")
del self.loader.__dict__['api_key']
self.loader.dump()
self.loader.get('api_key')
elif 'Max retries exceeded' in str(e.args[0]):
self.t.time_print("Error: Galaxy isn't running/connection error.")
self.t.time_print("Waiting 1 hour...")
import time
time.sleep(3600)
else:
raise
if len(available) >= self.max_histories:
msg = 'Clearing data.'
else:
msg = 'Not clearing data.'
self.t.time_print("Currently %d histories on Galaxy. %s" % (len(available), msg))
while len(available) > self.max_histories:
self.t.time_print("Deleting history %s to clear space..." % available.pop(len(available)-1)['name'])
try:
gi.histories.delete_history(available[-1]['id'], purge=True)
except ConnectionError as e:
if e.status_code == 403: # Invalid API key
self.t.time_print("Invalid Galaxy API Key!")
exit(1)
elif 'Max retries exceeded' in str(e.args[0]):
self.t.time_print("Error: Galaxy isn't running/connection error.")
exit(1)
else:
raise
self.t.time_print("Finished clearing space")
def make_call(self):
self.t.time_print("Checking for SNVPhyl requests...")
data = self.redmine.get_new_issues('cfia')
found = []
for issue in data['issues']:
if issue['id'] not in self.responded_issues and issue['status']['name'] == 'New':
if issue['subject'].lower() == 'snvphyl':
found.append(issue)
self.t.time_print("Found %d issues..." % len(found))
while len(found) > 0: # While there are still issues to respond to
self.respond_to_issue(found.pop(len(found)-1))
def respond_to_issue(self, issue):
# Run snvphyl
self.t.time_print("Found SNVPhyl to run. Subject: %s. ID: %s" % (issue['subject'], issue['id']))
self.t.time_print("Adding to responded to")
self.responded_issues.add(issue['id'])
self.issue_loader.responded_issues = list(self.responded_issues)
self.issue_loader.dump()
# Turn the description into a list of lines
input_list = issue['description'].split('\n')
input_list = map(str.strip, input_list) # Get rid of \r
error = False
try:
inputs = self.get_input(input_list, issue['id'])
response = "Running SNVPhyl with reference %s\n\nComparing to:" % inputs['reference']
for fastq in list(inputs['fastqs']):
response += '\n' + fastq
if inputs['reference'] not in inputs['fastqs']:
response += "Did you mean to not compare the reference to itself?" # TODO ask for answer
except ValueError as e:
response = "Sorry, there was a problem with your SNVPhyl request:\n%s\n" \
"Please submit a new request and close this one." % e.args[0]
error = True
self.t.time_print('\n' + response)
if error: # If something went wrong set the status to feedback and assign the author the issue
get = self.redmine.get_issue_data(issue['id'])
self.redmine.update_issue(issue['id'], notes=response, status_change=4,
assign_to_id=get['issue']['author']['id'])
else:
# Set the issue to in progress since the SNVPhyl is running
self.redmine.update_issue(issue['id'], notes=response, status_change=2)
if error:
return
else:
self.run_snvphyl(inputs)
@staticmethod
def encode(key, string):
encoded_chars = []
for i in range(len(string)):
key_c = key[i % len(key)]
encoded_c = chr(ord(string[i]) + ord(key_c) % 256)
encoded_chars.append(encoded_c)
encoded_string = "".join(encoded_chars)
encoded_string = bytes(encoded_string, "utf-8")
return base64.urlsafe_b64encode(encoded_string)
@staticmethod
def decode(key, string):
decoded_chars = []
string = base64.urlsafe_b64decode(string).decode('utf-8')
for i in range(len(string)):
key_c = key[i % len(key)]
encoded_c = chr(abs(ord(str(string[i]))
- ord(key_c) % 256))
decoded_chars.append(encoded_c)
decoded_string = "".join(decoded_chars)
return decoded_string
def __init__(self):
# import logging
# logging.basicConfig(level=logging.INFO)
# Vars
import sys
self.script_dir = sys.path[0]
self.config_json = os.path.join(self.script_dir, "config.json")
# Set up timer/logger
import datetime
if not os.path.exists(os.path.join(self.script_dir, 'runner_logs')):
os.makedirs(os.path.join(self.script_dir, 'runner_logs'))
self.t = Timer(log_file=os.path.join(self.script_dir, 'runner_logs',
datetime.datetime.now().strftime("%d-%m-%Y_%S:%M:%H")))
self.t.set_colour(30)
# Load issues that the bot has already responded to
self.issue_loader = SaveLoad(os.path.join(self.script_dir, 'responded_issues.json'), create=True)
self.responded_issues = set(self.issue_loader.get('responded_issues', default=[], ask=False))
# Get encrypted api key from config
# Load the config
self.loader = SaveLoad(self.config_json, create=True)
self.redmine_api_key = self.loader.get('redmine_api_key_encrypted', default='none', ask=False)
# If it's the first run then this will be yes
self.first_run = self.loader.get('first_run', default='yes', ask=False)
self.nas_mnt = os.path.normpath(self.loader.get('nasmnt', default="/mnt/nas/", get_type=str))
self.max_histories = self.loader.get('max_histories', default=6, get_type=int)
self.seconds_between_redmine_checks = (self.loader.get('seconds_between_redmine_checks', default=600, get_type=int))
# Make sure all the arguments are there
self.loader.get('workflow_id', default="f2db41e1fa331b3e")
self.loader.get('ip', default="http://192.168.1.3:48888/")
self.key = 'Sixteen byte key'
self.redmine = None
try:
self.main()
except Exception as e:
import traceback
self.t.time_print("[Error] Dumping...\n%s" % traceback.format_exc())
raise
if __name__ == "__main__":
Run()
|
|
import copy
import json
import multiprocessing
import os
import random
import shutil
import string
import tempfile
from contextlib import contextmanager
from os import chdir, getcwd, mkdir
from os.path import exists
import pkgpanda.build.constants
import pkgpanda.build.src_fetchers
from pkgpanda import expand_require as expand_require_exceptions
from pkgpanda import Install, PackageId, Repository
from pkgpanda.actions import add_package_file
from pkgpanda.constants import install_root, PKG_DIR, RESERVED_UNIT_NAMES
from pkgpanda.exceptions import FetchError, PackageError, ValidationError
from pkgpanda.subprocess import CalledProcessError, check_call, check_output
from pkgpanda.util import (check_forbidden_services, download_atomic,
hash_checkout, is_windows, load_json, load_string, logger,
make_directory, make_file, make_tar, remove_directory, rewrite_symlinks, write_json,
write_string)
class BuildError(Exception):
"""An error while building something."""
def __init__(self, msg: str):
self.msg = msg
def __str__(self):
return self.msg
class DockerCmd:
def __init__(self):
self.volumes = dict()
self.environment = dict()
self.container = str()
def run(self, name, cmd):
container_name = "{}-{}".format(
name, ''.join(
random.choice(string.ascii_lowercase) for _ in range(10)
)
)
docker = ["docker", "run", "--name={}".format(container_name)]
if is_windows:
# Default number of processes on Windows is 1, so bumping up to use all of them.
# The default memory allowed on Windows is 1GB. Some packages (mesos is an example)
# needs about 3.5gb to compile a single file. Therefore we need about 4gb per CPU.
numprocs = os.environ.get('NUMBER_OF_PROCESSORS')
docker += ["-m", "{0}gb".format(int(numprocs) * 4), "--cpu-count", numprocs]
for host_path, container_path in self.volumes.items():
docker += ["-v", "{0}:{1}".format(host_path, container_path)]
for k, v in self.environment.items():
docker += ["-e", "{0}={1}".format(k, v)]
docker.append(self.container)
docker += cmd
check_call(docker)
DockerCmd.clean(container_name)
@staticmethod
def clean(name):
"""Cleans up the specified container"""
check_call(["docker", "rm", "-v", name])
def get_variants_from_filesystem(directory, extension):
results = set()
for filename in os.listdir(directory):
# Skip things that don't end in the extension
if not filename.endswith(extension):
continue
variant = filename[:-len(extension)]
# Empty name variant shouldn't have a `.` following it
if variant == '.':
raise BuildError("Invalid filename {}. The \"default\" variant file should be just {}".format(
filename, extension))
# Empty / default variant is represented as 'None'.
if variant == '':
variant = None
else:
# Should be foo. since we've moved the extension.
if variant[-1] != '.':
raise BuildError("Invalid variant filename {}. Expected a '.' separating the "
"variant name and extension '{}'.".format(filename, extension))
variant = variant[:-1]
results.add(variant)
return results
def get_src_fetcher(src_info, cache_dir, working_directory):
try:
kind = src_info['kind']
if kind not in pkgpanda.build.src_fetchers.all_fetchers:
raise ValidationError("No known way to catch src with kind '{}'. Known kinds: {}".format(
kind,
pkgpanda.src_fetchers.all_fetchers.keys()))
args = {
'src_info': src_info,
'cache_dir': cache_dir
}
if src_info['kind'] in ['git_local', 'url', 'url_extract']:
args['working_directory'] = working_directory
return pkgpanda.build.src_fetchers.all_fetchers[kind](**args)
except ValidationError as ex:
raise BuildError("Validation error when fetching sources for package: {}".format(ex))
class TreeInfo:
ALLOWED_TREEINFO_KEYS = {'exclude', 'variants', 'core_package_list', 'bootstrap_package_list'}
def __init__(self, treeinfo_dict):
if treeinfo_dict.keys() > self.ALLOWED_TREEINFO_KEYS:
raise BuildError(
"treeinfo can only include the keys {}. Found {}".format(
self.ALLOWED_TREEINFO_KEYS, treeinfo_dict.keys()))
self.excludes = set(self._get_package_list(treeinfo_dict, 'exclude'))
self.core_package_list = set(self._get_package_list(treeinfo_dict, 'core_package_list', self.excludes))
self.bootstrap_package_list = set(self._get_package_list(
treeinfo_dict,
'bootstrap_package_list',
self.excludes))
# List of mandatory package variants to include in the buildinfo.
self.variants = treeinfo_dict.get('variants', dict())
if not isinstance(self.variants, dict):
raise BuildError("treeinfo variants must be a dictionary of package name to variant name")
@staticmethod
def _get_package_list(treeinfo_dict, key, excludes=None):
"""Return a list of package name strings from treeinfo_dict by key.
If key isn't present in treeinfo_dict, an empty list is returned.
"""
excludes = excludes or list()
package_list = treeinfo_dict.get(key, list())
# Validate package list.
if not isinstance(package_list, list):
raise BuildError("{} must be either null (meaning don't use) or a list of package names.".format(key))
for package_name in package_list:
if not isinstance(package_name, str):
raise BuildError("{} must be a list of strings. Found a {} with the value: {}".format(
key, type(package_name), package_name))
try:
PackageId.validate_name(package_name)
except ValidationError as ex:
raise BuildError("Invalid package name in {}: {}".format(key, package_name)) from ex
if package_name in excludes:
raise BuildError("Package found in both exclude and {}: {}".format(key, package_name))
return package_list
class PackageSet:
def __init__(self, variant, treeinfo, package_store):
self.variant = variant
self.all_packages = self.package_tuples_with_dependencies(
# If core_package_list is empty, default to all non-excluded packages.
treeinfo.core_package_list or (package_store.packages_by_name.keys() - treeinfo.excludes),
treeinfo,
package_store
)
self.validate_package_tuples(self.all_packages, treeinfo, package_store)
if treeinfo.bootstrap_package_list:
self.bootstrap_packages = self.package_tuples_with_dependencies(
treeinfo.bootstrap_package_list,
treeinfo,
package_store
)
self.validate_package_tuples(self.bootstrap_packages, treeinfo, package_store)
else:
self.bootstrap_packages = self.all_packages
# Validate bootstrap packages are a subset of all packages.
for package_name, variant in self.bootstrap_packages:
if (package_name, variant) not in self.all_packages:
raise BuildError("Bootstrap package {} (variant {}) not found in set of all packages".format(
package_name, pkgpanda.util.variant_name(variant)))
@staticmethod
def package_tuples_with_dependencies(package_names, treeinfo, package_store):
package_tuples = set((name, treeinfo.variants.get(name)) for name in set(package_names))
to_visit = list(package_tuples)
while to_visit:
package_tuple = to_visit.pop()
for require in package_store.get_buildinfo(*package_tuple)['requires']:
require_tuple = expand_require(require)
if require_tuple not in package_tuples:
to_visit.append(require_tuple)
package_tuples.add(require_tuple)
return package_tuples
@staticmethod
def validate_package_tuples(package_tuples, treeinfo, package_store):
# Validate that all packages have the variant specified in treeinfo.
print('package_tuples = %r' % package_tuples)
print('treeinfo = %r' % treeinfo.variants)
for package_name, variant in package_tuples:
treeinfo_variant = treeinfo.variants.get(package_name)
if variant != treeinfo_variant:
raise BuildError(
"package {} is supposed to have variant {} included in the tree according to the treeinfo, "
"but variant {} was found.".format(
package_name,
pkgpanda.util.variant_name(treeinfo_variant),
pkgpanda.util.variant_name(variant),
)
)
# Validate that all needed packages are built and not excluded by treeinfo.
for package_name, variant in package_tuples:
if (package_name, variant) not in package_store.packages:
raise BuildError(
"package {} variant {} is needed (explicitly requested or as a requires) "
"but is not in the set of built packages.".format(
package_name,
pkgpanda.util.variant_name(variant),
)
)
if package_name in treeinfo.excludes:
raise BuildError("package {} is needed (explicitly requested or as a requires) "
"but is excluded according to the treeinfo.json.".format(package_name))
class PackageStore:
def __init__(self, packages_dir, repository_url):
self._builders = {}
self._repository_url = repository_url.rstrip('/') if repository_url is not None else None
self._packages_dir = packages_dir.rstrip('/')
# Load all possible packages, making a dictionary from (name, variant) -> buildinfo
self._packages = dict()
self._packages_by_name = dict()
self._package_folders = dict()
# Load an upstream if one exists
# TODO(cmaloney): Allow upstreams to have upstreams
self._package_cache_dir = self._packages_dir + "/cache/packages"
self._upstream_dir = self._packages_dir + "/cache/upstream/checkout"
self._upstream = None
self._upstream_package_dir = self._upstream_dir + "/packages"
# TODO(cmaloney): Make it so the upstream directory can be kept around
remove_directory(self._upstream_dir)
upstream_config = self._packages_dir + '/upstream.json'
if os.path.exists(upstream_config):
try:
self._upstream = get_src_fetcher(
load_optional_json(upstream_config),
self._packages_dir + '/cache/upstream',
packages_dir)
self._upstream.checkout_to(self._upstream_dir)
if os.path.exists(self._upstream_package_dir + "/upstream.json"):
raise Exception("Support for upstreams which have upstreams is not currently implemented")
except Exception as ex:
raise BuildError("Error fetching upstream: {}".format(ex))
# Iterate through the packages directory finding all packages. Note this package dir comes
# first, then we ignore duplicate definitions of the same package
package_dirs = [self._packages_dir]
if self._upstream:
package_dirs.append(self._upstream_package_dir)
for directory in package_dirs:
for name in os.listdir(directory):
package_folder = directory + '/' + name
# Ignore files / non-directories
if not os.path.isdir(package_folder):
continue
# If we've already found this package, it means 1+ versions have been defined. Use
# those and ignore everything in the upstreams.
if name in self._packages_by_name:
continue
if is_windows:
builder_folder = os.path.join(directory, name, 'docker.windows')
else:
builder_folder = os.path.join(directory, name, 'docker')
if os.path.exists(builder_folder):
self._builders[name] = builder_folder
# Search the directory for buildinfo.json files, record the variants
for variant in get_variants_from_filesystem(package_folder, 'buildinfo.json'):
# Only adding the default dictionary once we know we have a package.
self._packages_by_name.setdefault(name, dict())
buildinfo = load_buildinfo(package_folder, variant)
self._packages[(name, variant)] = buildinfo
self._packages_by_name[name][variant] = buildinfo
if name in self._package_folders:
assert self._package_folders[name] == package_folder
else:
self._package_folders[name] = package_folder
def get_package_folder(self, name):
return self._package_folders[name]
def get_bootstrap_cache_dir(self):
return self._packages_dir + "/cache/bootstrap"
def get_complete_cache_dir(self):
return self._packages_dir + "/cache/complete"
def get_buildinfo(self, name, variant):
return self._packages[(name, variant)]
def get_last_complete_set(self, variants):
def get_last_complete(variant):
complete_latest = (
self.get_complete_cache_dir() + '/' + pkgpanda.util.variant_prefix(variant) + 'complete.latest.json')
if not os.path.exists(complete_latest):
raise BuildError("No last complete found for variant {}. Expected to find {} to match "
"{}".format(pkgpanda.util.variant_name(variant), complete_latest,
pkgpanda.util.variant_prefix(variant) + 'treeinfo.json'))
return load_json(complete_latest)
result = {}
if variants is None:
# Get all defined variants.
requested_variants = self.list_trees()
else:
requested_variants = variants
for variant in requested_variants:
result[variant] = get_last_complete(variant)
return result
def get_last_build_filename(self, name, variant):
return self.get_package_cache_folder(name) + '/{}latest'.format(pkgpanda.util.variant_prefix(variant))
def get_package_path(self, pkg_id):
return self.get_package_cache_folder(pkg_id.name) + '/{}.tar.xz'.format(pkg_id)
def get_package_cache_folder(self, name):
directory = self._package_cache_dir + '/' + name
make_directory(directory)
return directory
def list_trees(self):
return get_variants_from_filesystem(self._packages_dir, 'treeinfo.json')
def get_package_set(self, variant):
return PackageSet(variant, TreeInfo(load_config_variant(self._packages_dir, variant, 'treeinfo.json')), self)
def get_all_package_sets(self):
return [self.get_package_set(variant) for variant in sorted(self.list_trees(), key=pkgpanda.util.variant_str)]
@property
def packages(self):
return self._packages
@property
def builders(self):
return self._builders.copy()
@property
def packages_by_name(self):
return self._packages_by_name
@property
def packages_dir(self):
return self._packages_dir
def try_fetch_by_id(self, pkg_id: PackageId):
if self._repository_url is None:
return False
# TODO(cmaloney): Use storage providers to download instead of open coding.
pkg_path = "{}.tar.xz".format(pkg_id)
url = self._repository_url + '/packages/{0}/{1}'.format(pkg_id.name, pkg_path)
try:
directory = self.get_package_cache_folder(pkg_id.name)
# TODO(cmaloney): Move to some sort of logging mechanism?
print("Attempting to download", pkg_id, "from", url, "to", directory)
download_atomic(directory + '/' + pkg_path, url, directory)
assert os.path.exists(directory + '/' + pkg_path)
return directory + '/' + pkg_path
except FetchError:
return False
def try_fetch_bootstrap_and_active(self, bootstrap_id):
if self._repository_url is None:
return False
try:
bootstrap_name = '{}.bootstrap.tar.xz'.format(bootstrap_id)
active_name = '{}.active.json'.format(bootstrap_id)
# TODO(cmaloney): Use storage providers to download instead of open coding.
bootstrap_url = self._repository_url + '/bootstrap/' + bootstrap_name
active_url = self._repository_url + '/bootstrap/' + active_name
print("Attempting to download", bootstrap_name, "from", bootstrap_url)
dest_dir = self.get_bootstrap_cache_dir()
# Normalize to no trailing slash for repository_url
download_atomic(dest_dir + '/' + bootstrap_name, bootstrap_url, self._packages_dir)
print("Attempting to download", active_name, "from", active_url)
download_atomic(dest_dir + '/' + active_name, active_url, self._packages_dir)
return True
except FetchError:
return False
def expand_require(require):
try:
return expand_require_exceptions(require)
except ValidationError as ex:
raise BuildError(str(ex)) from ex
def get_docker_id(docker_name):
return check_output(["docker", "inspect", "-f", "{{ .Id }}", docker_name]).decode('utf-8').strip()
def hash_files_in_folder(directory):
"""Given a relative path, hashes all files inside that folder and subfolders
Returns a dictionary from filename to the hash of that file. If that whole
dictionary is hashed, you get a hash of all the contents of the folder.
This is split out from calculating the whole folder hash so that the
behavior in different walking corner cases can be more easily tested.
"""
assert not directory.startswith('/'), \
"For the hash to be reproducible on other machines relative paths must always be used. " \
"Got path: {}".format(directory)
directory = directory.rstrip('/')
file_hash_dict = {}
# TODO(cmaloney): Disallow symlinks as they're hard to hash, people can symlink / copy in their
# build steps if needed.
for root, dirs, filenames in os.walk(directory):
assert not root.startswith('/')
for name in filenames:
path = root + '/' + name
base = path[len(directory) + 1:]
file_hash_dict[base] = pkgpanda.util.sha1(path)
# If the directory has files inside of it, then it'll be picked up implicitly. by the files
# or folders inside of it. If it contains nothing, it wouldn't be picked up but the existence
# is important, so added it with a value for it's hash not-makeable via sha1 (empty string).
if len(filenames) == 0 and len(dirs) == 0:
path = root[len(directory) + 1:]
# Empty path means it is the root directory, in which case we want no entries, not a
# single entry "": ""
if path:
file_hash_dict[root[len(directory) + 1:]] = ""
return file_hash_dict
@contextmanager
def as_cwd(path):
start_dir = getcwd()
chdir(path)
yield
chdir(start_dir)
def hash_folder_abs(directory, work_dir):
assert directory.startswith(work_dir), "directory must be inside work_dir: {} {}".format(directory, work_dir)
assert not work_dir[-1] == '/', "This code assumes no trailing slash on the work_dir"
with as_cwd(work_dir):
return hash_folder(directory[len(work_dir) + 1:])
def hash_folder(directory):
return hash_checkout(hash_files_in_folder(directory))
# Try to read json from the given file. If it is an empty file, then return an
# empty json dictionary.
def load_optional_json(filename):
try:
with open(filename) as f:
text = f.read().strip()
if text:
return json.loads(text)
return {}
except OSError as ex:
raise BuildError("Failed to open JSON file {}: {}".format(filename, ex))
except ValueError as ex:
raise BuildError("Unable to parse json in {}: {}".format(filename, ex))
def load_config_variant(directory, variant, extension):
assert directory[-1] != '/'
return load_optional_json(directory + '/' + pkgpanda.util.variant_prefix(variant) + extension)
def load_buildinfo(path, variant):
buildinfo = load_config_variant(path, variant, 'buildinfo.json')
# Fill in default / guaranteed members so code everywhere doesn't have to guard around it.
default_build_script = 'build'
if is_windows:
default_build_script = 'build.ps1'
buildinfo.setdefault('build_script', pkgpanda.util.variant_prefix(variant) + default_build_script)
buildinfo.setdefault('docker', 'dcos/dcos-builder:dcos-builder_dockerdir-latest')
buildinfo.setdefault('environment', dict())
buildinfo.setdefault('requires', list())
buildinfo.setdefault('state_directory', False)
return buildinfo
def make_bootstrap_tarball(package_store, packages, variant):
# Convert filenames to package ids
pkg_ids = list()
for pkg_path in packages:
# Get the package id from the given package path
filename = os.path.basename(pkg_path)
if not filename.endswith(".tar.xz"):
raise BuildError("Packages must be packaged / end with a .tar.xz. Got {}".format(filename))
pkg_id = filename[:-len(".tar.xz")]
pkg_ids.append(pkg_id)
bootstrap_cache_dir = package_store.get_bootstrap_cache_dir()
# Filename is output_name.<sha-1>.{active.json|.bootstrap.tar.xz}
bootstrap_id = hash_checkout(pkg_ids)
latest_name = "{}/{}bootstrap.latest".format(bootstrap_cache_dir, pkgpanda.util.variant_prefix(variant))
output_name = bootstrap_cache_dir + '/' + bootstrap_id + '.'
# bootstrap tarball = <sha1 of packages in tarball>.bootstrap.tar.xz
bootstrap_name = "{}bootstrap.tar.xz".format(output_name)
active_name = "{}active.json".format(output_name)
def mark_latest():
# Ensure latest is always written
write_string(latest_name, bootstrap_id)
print("bootstrap: {}".format(bootstrap_name))
print("active: {}".format(active_name))
print("latest: {}".format(latest_name))
return bootstrap_id
if (os.path.exists(bootstrap_name)):
print("Bootstrap already up to date, not recreating")
return mark_latest()
make_directory(bootstrap_cache_dir)
# Try downloading.
if package_store.try_fetch_bootstrap_and_active(bootstrap_id):
print("Bootstrap already up to date, Not recreating. Downloaded from repository-url.")
return mark_latest()
print("Unable to download from cache. Building.")
print("Creating bootstrap tarball for variant {}".format(variant))
work_dir = tempfile.mkdtemp(prefix='mkpanda_bootstrap_tmp')
def make_abs(path):
return os.path.join(work_dir, path)
pkgpanda_root = make_abs("opt/mesosphere")
repository = Repository(os.path.join(pkgpanda_root, "packages"))
# Fetch all the packages to the root
for pkg_path in packages:
filename = os.path.basename(pkg_path)
pkg_id = filename[:-len(".tar.xz")]
def local_fetcher(id, target):
shutil.unpack_archive(pkg_path, target, "gztar")
repository.add(local_fetcher, pkg_id, False)
# Activate the packages inside the repository.
# Do generate dcos.target.wants inside the root so that we don't
# try messing with /etc/systemd/system.
install = Install(
root=pkgpanda_root,
config_dir=None,
rooted_systemd=True,
manage_systemd=False,
block_systemd=True,
fake_path=True,
skip_systemd_dirs=True,
manage_users=False,
manage_state_dir=False)
install.activate(repository.load_packages(pkg_ids))
# Mark the tarball as a bootstrap tarball/filesystem so that
# dcos-setup.service will fire.
make_file(make_abs("opt/mesosphere/bootstrap"))
# Write out an active.json for the bootstrap tarball
write_json(active_name, pkg_ids)
# Rewrite all the symlinks to point to /opt/mesosphere
rewrite_symlinks(work_dir, work_dir, "/")
make_tar(bootstrap_name, pkgpanda_root)
remove_directory(work_dir)
# Update latest last so that we don't ever use partially-built things.
write_string(latest_name, bootstrap_id)
print("Built bootstrap")
return mark_latest()
def build_tree_variants(package_store, mkbootstrap):
""" Builds all possible tree variants in a given package store
"""
result = dict()
tree_variants = get_variants_from_filesystem(package_store.packages_dir, 'treeinfo.json')
if len(tree_variants) == 0:
raise Exception('No treeinfo.json can be found in {}'.format(package_store.packages_dir))
for variant in tree_variants:
result[variant] = pkgpanda.build.build_tree(package_store, mkbootstrap, variant)
return result
def build_tree(package_store, mkbootstrap, tree_variants):
"""Build packages and bootstrap tarballs for one or all tree variants.
Returns a dict mapping tree variants to bootstrap IDs.
If tree_variant is None, builds all available tree variants.
"""
# TODO(cmaloney): Add support for circular dependencies. They are doable
# long as there is a pre-built version of enough of the packages.
# TODO(cmaloney): Make it so when we're building a treeinfo which has a
# explicit package list we don't build all the other packages.
build_order = list()
visited = set()
built = set()
def visit(pkg_tuple: tuple):
"""Add a package and its requires to the build order.
Raises AssertionError if pkg_tuple is in the set of visited packages.
If the package has any requires, they're recursively visited and added
to the build order depth-first. Then the package itself is added.
"""
# Visit the node for the first (and only) time.
assert pkg_tuple not in visited
visited.add(pkg_tuple)
# Ensure all dependencies are built. Sorted for stability.
# Requirements may be either strings or dicts, so we convert them all to (name, variant) tuples before sorting.
for require_tuple in sorted(expand_require(r) for r in package_store.packages[pkg_tuple]['requires']):
# If the dependency has already been built, we can move on.
if require_tuple in built:
continue
# If the dependency has not been built but has been visited, then
# there's a cycle in the dependency graph.
if require_tuple in visited:
raise BuildError("Circular dependency. Circular link {0} -> {1}".format(pkg_tuple, require_tuple))
if PackageId.is_id(require_tuple[0]):
raise BuildError("Depending on a specific package id is not supported. Package {} "
"depends on {}".format(pkg_tuple, require_tuple))
if require_tuple not in package_store.packages:
raise BuildError("Package {0} require {1} not buildable from tree.".format(pkg_tuple, require_tuple))
# Add the dependency (after its dependencies, if any) to the build
# order.
visit(require_tuple)
build_order.append(pkg_tuple)
built.add(pkg_tuple)
# Can't compare none to string, so expand none -> "true" / "false", then put
# the string in a field after "" if none, the string if not.
def key_func(elem):
return elem[0], elem[1] is None, elem[1] or ""
def visit_packages(package_tuples):
for pkg_tuple in sorted(package_tuples, key=key_func):
if pkg_tuple in visited:
continue
visit(pkg_tuple)
if tree_variants:
package_sets = [package_store.get_package_set(v) for v in tree_variants]
else:
package_sets = package_store.get_all_package_sets()
with logger.scope("resolve package graph"):
# Build all required packages for all tree variants.
for package_set in package_sets:
visit_packages(package_set.all_packages)
built_packages = dict()
for (name, variant) in build_order:
built_packages.setdefault(name, dict())
# Run the build, store the built package path for later use.
# TODO(cmaloney): Only build the requested variants, rather than all variants.
built_packages[name][variant] = build(
package_store,
name,
variant,
True)
# Build bootstrap tarballs for all tree variants.
def make_bootstrap(package_set):
with logger.scope("Making bootstrap variant: {}".format(pkgpanda.util.variant_name(package_set.variant))):
package_paths = list()
for name, pkg_variant in package_set.bootstrap_packages:
package_paths.append(built_packages[name][pkg_variant])
if mkbootstrap:
return make_bootstrap_tarball(
package_store,
list(sorted(package_paths)),
package_set.variant)
# Build bootstraps and and package lists for all variants.
# TODO(cmaloney): Allow distinguishing between "build all" and "build the default one".
complete_cache_dir = package_store.get_complete_cache_dir()
make_directory(complete_cache_dir)
results = {}
for package_set in package_sets:
info = {
'bootstrap': make_bootstrap(package_set),
'packages': sorted(
load_string(package_store.get_last_build_filename(*pkg_tuple))
for pkg_tuple in package_set.all_packages)}
write_json(
complete_cache_dir + '/' + pkgpanda.util.variant_prefix(package_set.variant) + 'complete.latest.json',
info)
results[package_set.variant] = info
return results
def assert_no_duplicate_keys(lhs, rhs):
if len(lhs.keys() & rhs.keys()) != 0:
print("ASSERTION FAILED: Duplicate keys between {} and {}".format(lhs, rhs))
assert len(lhs.keys() & rhs.keys()) == 0
# Find all build variants and build them
def build_package_variants(package_store, name, clean_after_build=True, recursive=False):
# Find the packages dir / root of the packages tree, and create a PackageStore
results = dict()
for variant in package_store.packages_by_name[name].keys():
results[variant] = build(
package_store,
name,
variant,
clean_after_build=clean_after_build,
recursive=recursive)
return results
class IdBuilder():
def __init__(self, buildinfo):
self._start_keys = set(buildinfo.keys())
self._buildinfo = copy.deepcopy(buildinfo)
self._taken = set()
def _check_no_key(self, field):
if field in self._buildinfo:
raise BuildError("Key {} shouldn't be in buildinfo, but was".format(field))
def add(self, field, value):
self._check_no_key(field)
self._buildinfo[field] = value
def has(self, field):
return field in self._buildinfo
def take(self, field):
self._taken.add(field)
return self._buildinfo[field]
def replace(self, taken_field, new_field, new_value):
assert taken_field in self._buildinfo
self._check_no_key(new_field)
del self._buildinfo[taken_field]
self._buildinfo[new_field] = new_value
self._taken.add(new_field)
def update(self, field, new_value):
assert field in self._buildinfo
self._buildinfo[field] = new_value
def get_build_ids(self):
# If any keys are left in the buildinfo, error that there were unused keys
remaining_keys = self._start_keys - self._taken
if remaining_keys:
raise BuildError("ERROR: Unknown keys {} in buildinfo.json".format(remaining_keys))
return self._buildinfo
def build(package_store: PackageStore, name: str, variant, clean_after_build, recursive=False):
msg = "Building package {} variant {}".format(name, pkgpanda.util.variant_name(variant))
with logger.scope(msg):
return _build(package_store, name, variant, clean_after_build, recursive)
def _build(package_store, name, variant, clean_after_build, recursive):
assert isinstance(package_store, PackageStore)
tmpdir = tempfile.TemporaryDirectory(prefix="pkgpanda_repo")
repository = Repository(tmpdir.name)
package_dir = package_store.get_package_folder(name)
def src_abs(name):
return package_dir + '/' + name
def cache_abs(filename):
return package_store.get_package_cache_folder(name) + '/' + filename
# Build pkginfo over time, translating fields from buildinfo.
pkginfo = {}
# Build up the docker command arguments over time, translating fields as needed.
cmd = DockerCmd()
assert (name, variant) in package_store.packages, \
"Programming error: name, variant should have been validated to be valid before calling build()."
builder = IdBuilder(package_store.get_buildinfo(name, variant))
final_buildinfo = dict()
builder.add('name', name)
builder.add('variant', pkgpanda.util.variant_str(variant))
# Convert single_source -> sources
if builder.has('sources'):
if builder.has('single_source'):
raise BuildError('Both sources and single_source cannot be specified at the same time')
sources = builder.take('sources')
elif builder.has('single_source'):
sources = {name: builder.take('single_source')}
builder.replace('single_source', 'sources', sources)
else:
builder.add('sources', {})
sources = dict()
print("NOTICE: No sources specified")
final_buildinfo['sources'] = sources
# Construct the source fetchers, gather the checkout ids from them
checkout_ids = dict()
fetchers = dict()
try:
for src_name, src_info in sorted(sources.items()):
# TODO(cmaloney): Switch to a unified top level cache directory shared by all packages
cache_dir = package_store.get_package_cache_folder(name) + '/' + src_name
make_directory(cache_dir)
fetcher = get_src_fetcher(src_info, cache_dir, package_dir)
fetchers[src_name] = fetcher
checkout_ids[src_name] = fetcher.get_id()
except ValidationError as ex:
raise BuildError("Validation error when fetching sources for package: {}".format(ex))
for src_name, checkout_id in checkout_ids.items():
# NOTE: single_source buildinfo was expanded above so the src_name is
# always correct here.
# Make sure we never accidentally overwrite something which might be
# important. Fields should match if specified (And that should be
# tested at some point). For now disallowing identical saves hassle.
assert_no_duplicate_keys(checkout_id, final_buildinfo['sources'][src_name])
final_buildinfo['sources'][src_name].update(checkout_id)
# Add the sha1 of the buildinfo.json + build file to the build ids
builder.update('sources', checkout_ids)
build_script_file = builder.take('build_script')
# TODO(cmaloney): Change dest name to build_script_sha1
builder.replace('build_script', 'build', pkgpanda.util.sha1(src_abs(build_script_file)))
builder.add('pkgpanda_version', pkgpanda.build.constants.version)
extra_dir = src_abs("extra")
# Add the "extra" folder inside the package as an additional source if it
# exists
if os.path.exists(extra_dir):
extra_id = hash_folder_abs(extra_dir, package_dir)
builder.add('extra_source', extra_id)
final_buildinfo['extra_source'] = extra_id
# Figure out the docker name.
docker_name = builder.take('docker')
cmd.container = docker_name
# Add the id of the docker build environment to the build_ids.
try:
docker_id = get_docker_id(docker_name)
except CalledProcessError:
# docker pull the container and try again
check_call(['docker', 'pull', docker_name])
docker_id = get_docker_id(docker_name)
builder.update('docker', docker_id)
# TODO(cmaloney): The environment variables should be generated during build
# not live in buildinfo.json.
pkginfo['environment'] = builder.take('environment')
# Whether pkgpanda should on the host make sure a `/var/lib` state directory is available
pkginfo['state_directory'] = builder.take('state_directory')
if pkginfo['state_directory'] not in [True, False]:
raise BuildError("state_directory in buildinfo.json must be a boolean `true` or `false`")
username = None
if builder.has('username'):
username = builder.take('username')
if not isinstance(username, str):
raise BuildError("username in buildinfo.json must be either not set (no user for this"
" package), or a user name string")
try:
pkgpanda.UserManagement.validate_username(username)
except ValidationError as ex:
raise BuildError("username in buildinfo.json didn't meet the validation rules. {}".format(ex))
pkginfo['username'] = username
group = None
if builder.has('group'):
group = builder.take('group')
if not isinstance(group, str):
raise BuildError("group in buildinfo.json must be either not set (use default group for this user)"
", or group must be a string")
try:
pkgpanda.UserManagement.validate_group_name(group)
except ValidationError as ex:
raise BuildError("group in buildinfo.json didn't meet the validation rules. {}".format(ex))
pkginfo['group'] = group
# Packages need directories inside the fake install root (otherwise docker
# will try making the directories on a readonly filesystem), so build the
# install root now, and make the package directories in it as we go.
install_dir = tempfile.mkdtemp(prefix="pkgpanda-")
active_packages = list()
active_package_ids = set()
active_package_variants = dict()
auto_deps = set()
# Final package has the same requires as the build.
requires = builder.take('requires')
pkginfo['requires'] = requires
if builder.has("sysctl"):
pkginfo["sysctl"] = builder.take("sysctl")
# TODO(cmaloney): Pull generating the full set of requires a function.
to_check = copy.deepcopy(requires)
if type(to_check) != list:
raise BuildError("`requires` in buildinfo.json must be an array of dependencies.")
while to_check:
requires_info = to_check.pop(0)
requires_name, requires_variant = expand_require(requires_info)
if requires_name in active_package_variants:
# TODO(cmaloney): If one package depends on the <default>
# variant of a package and 1+ others depends on a non-<default>
# variant then update the dependency to the non-default variant
# rather than erroring.
if requires_variant != active_package_variants[requires_name]:
# TODO(cmaloney): Make this contain the chains of
# dependencies which contain the conflicting packages.
# a -> b -> c -> d {foo}
# e {bar} -> d {baz}
raise BuildError(
"Dependncy on multiple variants of the same package {}. variants: {} {}".format(
requires_name,
requires_variant,
active_package_variants[requires_name]))
# The variant has package {requires_name, variant} already is a
# dependency, don't process it again / move on to the next.
continue
active_package_variants[requires_name] = requires_variant
# Figure out the last build of the dependency, add that as the
# fully expanded dependency.
requires_last_build = package_store.get_last_build_filename(requires_name, requires_variant)
if not os.path.exists(requires_last_build):
if recursive:
# Build the dependency
build(package_store, requires_name, requires_variant, clean_after_build, recursive)
else:
raise BuildError("No last build file found for dependency {} variant {}. Rebuild "
"the dependency".format(requires_name, requires_variant))
try:
pkg_id_str = load_string(requires_last_build)
auto_deps.add(pkg_id_str)
pkg_buildinfo = package_store.get_buildinfo(requires_name, requires_variant)
pkg_requires = pkg_buildinfo['requires']
pkg_path = repository.package_path(pkg_id_str)
pkg_tar = pkg_id_str + '.tar.xz'
if not os.path.exists(package_store.get_package_cache_folder(requires_name) + '/' + pkg_tar):
raise BuildError(
"The build tarball {} refered to by the last_build file of the dependency {} "
"variant {} doesn't exist. Rebuild the dependency.".format(
pkg_tar,
requires_name,
requires_variant))
active_package_ids.add(pkg_id_str)
# Mount the package into the docker container.
cmd.volumes[pkg_path] = install_root + "/packages/{}:ro".format(pkg_id_str)
os.makedirs(os.path.join(install_dir, "packages/{}".format(pkg_id_str)))
# Add the dependencies of the package to the set which will be
# activated.
# TODO(cmaloney): All these 'transitive' dependencies shouldn't
# be available to the package being built, only what depends on
# them directly.
to_check += pkg_requires
except ValidationError as ex:
raise BuildError("validating package needed as dependency {0}: {1}".format(requires_name, ex)) from ex
except PackageError as ex:
raise BuildError("loading package needed as dependency {0}: {1}".format(requires_name, ex)) from ex
# Add requires to the package id, calculate the final package id.
# NOTE: active_packages isn't fully constructed here since we lazily load
# packages not already in the repository.
builder.update('requires', list(active_package_ids))
version_extra = None
if builder.has('version_extra'):
version_extra = builder.take('version_extra')
build_ids = builder.get_build_ids()
version_base = hash_checkout(build_ids)
version = None
if builder.has('version_extra'):
version = "{0}-{1}".format(version_extra, version_base)
else:
version = version_base
pkg_id = PackageId.from_parts(name, version)
# Everything must have been extracted by now. If it wasn't, then we just
# had a hard error that it was set but not used, as well as didn't include
# it in the caluclation of the PackageId.
builder = None
# Save the build_ids. Useful for verify exactly what went into the
# package build hash.
final_buildinfo['build_ids'] = build_ids
final_buildinfo['package_version'] = version
# Save the package name and variant. The variant is used when installing
# packages to validate dependencies.
final_buildinfo['name'] = name
final_buildinfo['variant'] = variant
# If the package is already built, don't do anything.
pkg_path = package_store.get_package_cache_folder(name) + '/{}.tar.xz'.format(pkg_id)
# Done if it exists locally
if exists(pkg_path):
print("Package up to date. Not re-building.")
# TODO(cmaloney): Updating / filling last_build should be moved out of
# the build function.
write_string(package_store.get_last_build_filename(name, variant), str(pkg_id))
return pkg_path
# Try downloading.
dl_path = package_store.try_fetch_by_id(pkg_id)
if dl_path:
print("Package up to date. Not re-building. Downloaded from repository-url.")
# TODO(cmaloney): Updating / filling last_build should be moved out of
# the build function.
write_string(package_store.get_last_build_filename(name, variant), str(pkg_id))
print(dl_path, pkg_path)
assert dl_path == pkg_path
return pkg_path
# Fall out and do the build since it couldn't be downloaded
print("Unable to download from cache. Proceeding to build")
print("Building package {} with buildinfo: {}".format(
pkg_id,
json.dumps(final_buildinfo, indent=2, sort_keys=True)))
# Clean out src, result so later steps can use them freely for building.
def clean():
# Run a docker container to remove src/ and result/
cmd = DockerCmd()
cmd.volumes = {
package_store.get_package_cache_folder(name): PKG_DIR + "/:rw",
}
if is_windows:
cmd.container = "microsoft/windowsservercore:1709"
filename = PKG_DIR + "\\src"
cmd.run("package-cleaner",
["cmd.exe", "/c", "if", "exist", filename, "rmdir", "/s", "/q", filename])
filename = PKG_DIR + "\\result"
cmd.run("package-cleaner",
["cmd.exe", "/c", "if", "exist", filename, "rmdir", "/s", "/q", filename])
else:
cmd.container = "ubuntu:14.04.4"
cmd.run("package-cleaner", ["rm", "-rf", PKG_DIR + "/src", PKG_DIR + "/result"])
clean()
# Only fresh builds are allowed which don't overlap existing artifacts.
result_dir = cache_abs("result")
if exists(result_dir):
raise BuildError("result folder must not exist. It will be made when the package is "
"built. {}".format(result_dir))
# 'mkpanda add' all implicit dependencies since we actually need to build.
for dep in auto_deps:
print("Auto-adding dependency: {}".format(dep))
# NOTE: Not using the name pkg_id because that overrides the outer one.
id_obj = PackageId(dep)
add_package_file(repository, package_store.get_package_path(id_obj))
package = repository.load(dep)
active_packages.append(package)
# Checkout all the sources int their respective 'src/' folders.
try:
src_dir = cache_abs('src')
if os.path.exists(src_dir):
raise ValidationError(
"'src' directory already exists, did you have a previous build? " +
"Currently all builds must be from scratch. Support should be " +
"added for re-using a src directory when possible. src={}".format(src_dir))
os.mkdir(src_dir)
for src_name, fetcher in sorted(fetchers.items()):
root = cache_abs('src/' + src_name)
os.mkdir(root)
fetcher.checkout_to(root)
except ValidationError as ex:
raise BuildError("Validation error when fetching sources for package: {}".format(ex))
# Activate the packages so that we have a proper path, environment
# variables.
# TODO(cmaloney): RAII type thing for temproary directory so if we
# don't get all the way through things will be cleaned up?
install = Install(
root=install_dir,
config_dir=None,
rooted_systemd=True,
manage_systemd=False,
block_systemd=True,
fake_path=True,
manage_users=False,
manage_state_dir=False)
install.activate(active_packages)
# Rewrite all the symlinks inside the active path because we will
# be mounting the folder into a docker container, and the absolute
# paths to the packages will change.
# TODO(cmaloney): This isn't very clean, it would be much nicer to
# just run pkgpanda inside the package.
rewrite_symlinks(install_dir, repository.path, install_root + "/packages/")
print("Building package in docker")
# TODO(cmaloney): Run as a specific non-root user, make it possible
# for non-root to cleanup afterwards.
# Run the build, prepping the environment as necessary.
mkdir(cache_abs("result"))
# Copy the build info to the resulting tarball
write_json(cache_abs("src/buildinfo.full.json"), final_buildinfo)
write_json(cache_abs("result/buildinfo.full.json"), final_buildinfo)
write_json(cache_abs("result/pkginfo.json"), pkginfo)
# Make the folder for the package we are building. If docker does it, it
# gets auto-created with root permissions and we can't actually delete it.
os.makedirs(os.path.join(install_dir, "packages", str(pkg_id)))
# TOOD(cmaloney): Disallow writing to well known files and directories?
# Source we checked out
cmd.volumes.update({
# TODO(cmaloney): src should be read only...
# Source directory
cache_abs("src"): PKG_DIR + "/src:rw",
# Getting the result out
cache_abs("result"): install_root + "/packages/{}:rw".format(pkg_id),
# The build script directory
package_dir: PKG_DIR + "/build:ro"
})
if is_windows:
cmd.volumes.update({
# todo: This is a temporary work around until Windows RS4 comes out that has a fix
# that allows overlapping mount directories. We should not make this also happen
# on Linux as it will probably break a bunch of stuff unnecessarily that will only
# need to be undone in the future.
install_dir: install_root + "/install_dir:ro"
})
else:
cmd.volumes.update({
install_dir: install_root + ":ro"
})
if os.path.exists(extra_dir):
cmd.volumes[extra_dir] = PKG_DIR + "/extra:ro"
cmd.environment = {
"PKG_VERSION": version,
"PKG_NAME": name,
"PKG_ID": pkg_id,
"PKG_PATH": install_root + "/packages/{}".format(pkg_id),
"PKG_VARIANT": variant if variant is not None else "<default>",
"NUM_CORES": multiprocessing.cpu_count()
}
try:
# TODO(cmaloney): Run a wrapper which sources
# /opt/mesosphere/environment then runs a build. Also should fix
# ownership of /opt/mesosphere/packages/{pkg_id} post build.
command = [PKG_DIR + "/build/" + build_script_file]
cmd.run("package-builder", command)
except CalledProcessError as ex:
raise BuildError("docker exited non-zero: {}\nCommand: {}".format(ex.returncode, ' '.join(ex.cmd)))
# Clean up the temporary install dir used for dependencies.
# TODO(cmaloney): Move to an RAII wrapper.
remove_directory(install_dir)
with logger.scope("Build package tarball"):
# Check for forbidden services before packaging the tarball:
try:
check_forbidden_services(cache_abs("result"), RESERVED_UNIT_NAMES)
except ValidationError as ex:
raise BuildError("Package validation failed: {}".format(ex))
# TODO(cmaloney): Updating / filling last_build should be moved out of
# the build function.
write_string(package_store.get_last_build_filename(name, variant), str(pkg_id))
# Bundle the artifacts into the pkgpanda package
tmp_name = pkg_path + "-tmp.tar.xz"
make_tar(tmp_name, cache_abs("result"))
os.replace(tmp_name, pkg_path)
print("Package built.")
if clean_after_build:
clean()
return pkg_path
|
|
# ------------------------------------------------------------------------------
# binstruct unit testing
#
# Thomas Bonim (thomas.bonim@googlemail.com)
# This code is in the public domain
# ------------------------------------------------------------------------------
import unittest
import binstruct
class TestModule(unittest.TestCase):
def test_error(self):
with self.assertRaises(binstruct.error):
raise binstruct.error('test')
class TestObject(unittest.TestCase):
def setUp(self):
self.data = b"\x11\x22\x00\x44\x55"
self.obj = binstruct.Binstruct(self.data)
def test_data(self):
self.assertIs(self.obj.data, self.data)
self.assertEqual(self.obj.pos, 0)
with self.assertRaises(AttributeError):
self.obj.data = "other"
def test_pos(self):
self.assertEqual(self.obj.pos, 0)
self.assertIsInstance(self.obj.pos, int)
self.obj.pos = 1
self.assertEqual(self.obj.pos, 1)
self.assertIsInstance(self.obj.pos, int)
self.obj.pos = 2L
self.assertEqual(self.obj.pos, 2)
self.assertIsInstance(self.obj.pos, int)
self.obj.pos += 1
self.assertEqual(self.obj.pos, 3)
self.assertIsInstance(self.obj.pos, int)
self.obj.pos += 1L
self.assertEqual(self.obj.pos, 4)
self.assertIsInstance(self.obj.pos, int)
with self.assertRaises(TypeError):
self.obj.pos = None
with self.assertRaises(TypeError):
del self.obj.pos
with self.assertRaises(binstruct.error):
self.obj.pos = -1
with self.assertRaises(binstruct.error):
self.obj.pos = 6
def test_init(self):
data = 'other'
self.obj.pos += 1
self.obj.__init__(data)
self.assertIs(self.obj.data, data)
self.assertEqual(self.obj.pos, 0)
class TestSubclass(unittest.TestCase):
class Subclass(binstruct.Binstruct):
def __init__(self, data, ext):
super(TestSubclass.Subclass, self).__init__(data)
self.ext = ext
def method(self):
self.pos += 1
def test_subclass(self):
obj = TestSubclass.Subclass(b"\x11\x22", 0)
self.assertEqual(obj.data, b"\x11\x22")
self.assertEqual(obj.pos, 0)
self.assertEqual(obj.ext, 0)
obj.method()
self.assertEqual(obj.pos, 1)
class TestEmpty(unittest.TestCase):
def test_range_u8(self):
obj = binstruct.Binstruct(b"")
with self.assertRaises(binstruct.error):
obj.unpack_u8()
with self.assertRaises(binstruct.error):
obj.pos += 1
class TestUnpackByte(unittest.TestCase):
def test_bool(self):
obj = binstruct.Binstruct(b"\x00\x01")
value = obj.unpack_bool()
self.assertIsInstance(value, bool)
self.assertEqual(value, False)
self.assertEqual(obj.pos, 1)
value = obj.unpack_bool()
self.assertIsInstance(value, bool)
self.assertEqual(value, True)
self.assertEqual(obj.pos, 2)
with self.assertRaises(binstruct.error):
obj.unpack_bool()
def test_u8(self):
obj = binstruct.Binstruct(b"\x81\x82")
with self.assertRaises(TypeError):
obj.unpack_u8(None)
value = obj.unpack_u8()
self.assertEqual(value, 0x81)
self.assertEqual(obj.pos, 1)
self.assertIsInstance(value, int)
self.assertEqual(obj.unpack_u8(), 0x82)
self.assertEqual(obj.pos, 2)
def test_s8(self):
obj = binstruct.Binstruct(b"\x81\x82")
value = obj.unpack_s8()
self.assertEqual(value, -127)
self.assertEqual(obj.pos, 1)
self.assertIsInstance(value, int)
value = obj.unpack_s8()
self.assertEqual(value, -126)
self.assertEqual(obj.pos, 2)
self.assertIsInstance(value, int)
with self.assertRaises(TypeError):
obj.unpack_s8(None)
class TestUnpackBE(unittest.TestCase):
def setUp(self):
self.obj = binstruct.Binstruct(b"\x81\x82\x83\x84\x85\x86\x87\x88")
def test_ube16(self):
value = self.obj.unpack_ube16()
self.assertEqual(value, 0x8182)
self.assertIsInstance(value, int)
self.assertEqual(self.obj.pos, 2)
def test_sbe16(self):
value = self.obj.unpack_sbe16()
self.assertEqual(value, -32382)
self.assertIsInstance(value, int)
self.assertEqual(self.obj.pos, 2)
def test_ube32(self):
value = self.obj.unpack_ube32()
self.assertEqual(value, 0x81828384)
self.assertEqual(self.obj.pos, 4)
def test_sbe32(self):
value = self.obj.unpack_sbe32()
self.assertEqual(value, -2122153084)
self.assertIsInstance(value, int)
self.assertEqual(self.obj.pos, 4)
def test_ube64(self):
value = self.obj.unpack_ube64()
self.assertEqual(value, 0x8182838485868788)
self.assertIsInstance(value, long)
self.assertEqual(self.obj.pos, 8)
def test_sbe64(self):
value = self.obj.unpack_sbe64()
self.assertEqual(value, -9114578090645354616)
self.assertIsInstance(value, long)
self.assertEqual(self.obj.pos, 8)
class TestUnpackLE(unittest.TestCase):
def setUp(self):
self.obj = binstruct.Binstruct(b"\x81\x82\x83\x84\x85\x86\x87\x88")
def test_ule16(self):
value = self.obj.unpack_ule16()
self.assertEqual(value, 0x8281)
self.assertIsInstance(value, int)
self.assertEqual(self.obj.pos, 2)
def test_sle16(self):
value = self.obj.unpack_sle16()
self.assertEqual(value, -32127)
self.assertIsInstance(value, int)
self.assertEqual(self.obj.pos, 2)
def test_ule32(self):
value = self.obj.unpack_ule32()
self.assertEqual(value, 0x84838281)
self.assertEqual(self.obj.pos, 4)
def test_sle32(self):
value = self.obj.unpack_sle32()
self.assertEqual(value, -2071756159)
self.assertIsInstance(value, int)
self.assertEqual(self.obj.pos, 4)
def test_ule64(self):
value = self.obj.unpack_ule64()
self.assertEqual(value, 0x8887868584838281)
self.assertIsInstance(value, long)
self.assertEqual(self.obj.pos, 8)
def test_sle64(self):
value = self.obj.unpack_sle64()
self.assertEqual(value, -8608764254683430271)
self.assertIsInstance(value, long)
self.assertEqual(self.obj.pos, 8)
class TestUnpackULEB128(unittest.TestCase):
def test_arg(self):
obj = binstruct.Binstruct(b"\01")
with self.assertRaises(TypeError):
obj.unpack_uleb128(None)
def test_value(self):
obj = binstruct.Binstruct(b"\xE5\x8E\x26\x00\xFF")
value = obj.unpack_uleb128()
self.assertIsInstance(value, int)
self.assertEqual(value, 624485)
self.assertEqual(obj.pos, 3)
def test_long(self):
obj = binstruct.Binstruct(b"\xFF\xFF\xFF\xFF\xFF\xE5\x8E\x26\x00\xFF")
value = obj.unpack_uleb128()
self.assertIsInstance(value, long)
self.assertEqual(value, 18446744073709551615L)
self.assertEqual(obj.pos, 8)
def test_empty(self):
obj = binstruct.Binstruct(b"")
with self.assertRaises(binstruct.error):
obj.unpack_uleb128()
def test_zero(self):
obj = binstruct.Binstruct(b"\x00")
self.assertEqual(obj.unpack_uleb128(), 0)
self.assertEqual(obj.pos, 1)
def test_corrupt(self):
obj = binstruct.Binstruct(b"\x80")
with self.assertRaises(binstruct.error):
obj.unpack_uleb128()
def test_messy(self):
obj = binstruct.Binstruct(b"\x80\x01")
self.assertEqual(obj.unpack_uleb128(), 0x80)
self.assertEqual(obj.pos, 2)
class TestUnpackSLEB128(unittest.TestCase):
def test_arg(self):
obj = binstruct.Binstruct(b"\01")
with self.assertRaises(TypeError):
obj.unpack_sleb128(None)
def test_value(self):
obj = binstruct.Binstruct(b"\x9b\xf1\x59\x00\xFF")
value = obj.unpack_sleb128()
self.assertIsInstance(value, int)
self.assertEqual(value, -624485)
self.assertEqual(obj.pos, 3)
def test_long(self):
obj = binstruct.Binstruct(b"\xFF\xFF\xFF\xE5\x8E\x76\x00\xFF")
value = obj.unpack_sleb128()
self.assertIsInstance(value, int)
self.assertEqual(value, -322961409)
self.assertEqual(obj.pos, 6)
def test_empty(self):
obj = binstruct.Binstruct(b"")
with self.assertRaises(binstruct.error):
obj.unpack_sleb128()
def test_zero(self):
obj = binstruct.Binstruct(b"\x00")
self.assertEqual(obj.unpack_sleb128(), 0)
self.assertEqual(obj.pos, 1)
def test_corrupt(self):
obj = binstruct.Binstruct(b"\x80")
with self.assertRaises(binstruct.error):
obj.unpack_sleb128()
def test_messy(self):
obj = binstruct.Binstruct(b"\x80\x01")
self.assertEqual(obj.unpack_sleb128(), 0x80)
self.assertEqual(obj.pos, 2)
class TestUnpackString(unittest.TestCase):
def test_string(self):
obj = binstruct.Binstruct(b"test\x00next\x00")
value = obj.unpack_string()
self.assertIsInstance(value, str)
self.assertEqual(value, "test")
self.assertEqual(obj.pos, 5)
value = obj.unpack_string()
self.assertIsInstance(value, str)
self.assertEqual(value, "next")
self.assertEqual(obj.pos, 10)
def test_empty(self):
obj = binstruct.Binstruct(b"")
with self.assertRaises(binstruct.error):
obj.unpack_string()
def test_unterminated(self):
obj = binstruct.Binstruct(b"test")
value = obj.unpack_string()
self.assertIsInstance(value, str)
self.assertEqual(value, "test")
def test_arg(self):
obj = binstruct.Binstruct(b"\01")
with self.assertRaises(TypeError):
obj.unpack_string(None)
class TestUnpackBlock(unittest.TestCase):
def test_block(self):
obj = binstruct.Binstruct(b"test\x11")
value = obj.unpack_block(4)
self.assertIsInstance(value, str)
self.assertEqual(value, b"test")
self.assertEqual(obj.pos, 4)
value = obj.unpack_block(1)
self.assertIsInstance(value, str)
self.assertEqual(value, b"\x11")
self.assertEqual(obj.pos, 5)
value = obj.unpack_block(0)
self.assertIsInstance(value, str)
self.assertEqual(value, b"")
self.assertEqual(obj.pos, 5)
def test_error(self):
data = "test\x11"
obj = binstruct.Binstruct(data)
with self.assertRaises(TypeError):
obj.unpack_block()
with self.assertRaises(TypeError):
obj.unpack_block(None)
with self.assertRaises(binstruct.error):
obj.unpack_block(-1)
with self.assertRaises(binstruct.error):
obj.unpack_block(6)
obj.pos = 5
with self.assertRaises(binstruct.error):
obj.unpack_block(1)
self.assertEqual(obj.pos, 5)
class TestUnpackBlock8(unittest.TestCase):
def test_block(self):
obj = binstruct.Binstruct(b"\x03test")
value = obj.unpack_block_u8()
self.assertIsInstance(value, str)
self.assertEqual(value, "tes")
self.assertEqual(obj.pos, 4)
obj = binstruct.Binstruct(b"\x00test")
value = obj.unpack_block_u8()
self.assertIsInstance(value, str)
self.assertEqual(value, "")
self.assertEqual(obj.pos, 1)
def test_error(self):
obj = binstruct.Binstruct(b"\x05test")
with self.assertRaises(binstruct.error):
obj.unpack_block_u8()
obj = binstruct.Binstruct(b"")
with self.assertRaises(binstruct.error):
obj.unpack_block_u8()
obj = binstruct.Binstruct(b"test")
with self.assertRaises(TypeError):
obj.unpack_block_u8(None)
class TestUnpackBlock16(unittest.TestCase):
def test_block(self):
obj = binstruct.Binstruct(b"\x00\x03test")
value = obj.unpack_block_be16()
self.assertIsInstance(value, str)
self.assertEqual(value, "tes")
self.assertEqual(obj.pos, 5)
obj = binstruct.Binstruct(b"\x03\x00test")
value = obj.unpack_block_le16()
self.assertIsInstance(value, str)
self.assertEqual(value, "tes")
self.assertEqual(obj.pos, 5)
obj = binstruct.Binstruct(b"\x00\x00test")
value = obj.unpack_block_be16()
self.assertIsInstance(value, str)
self.assertEqual(value, "")
self.assertEqual(obj.pos, 2)
obj = binstruct.Binstruct(b"\x00\x00test")
value = obj.unpack_block_le16()
self.assertIsInstance(value, str)
self.assertEqual(value, "")
self.assertEqual(obj.pos, 2)
def test_error(self):
obj = binstruct.Binstruct(b"\x05\x00test")
with self.assertRaises(binstruct.error):
obj.unpack_block_be16()
obj = binstruct.Binstruct(b"\x00\x05test")
with self.assertRaises(binstruct.error):
obj.unpack_block_le16()
obj = binstruct.Binstruct(b"")
with self.assertRaises(binstruct.error):
obj.unpack_block_be16()
with self.assertRaises(binstruct.error):
obj.unpack_block_le16()
obj = binstruct.Binstruct(b"test")
with self.assertRaises(TypeError):
obj.unpack_block_be16(None)
with self.assertRaises(TypeError):
obj.unpack_block_le16(None)
class TestUnpackBlock32(unittest.TestCase):
def test_block(self):
obj = binstruct.Binstruct(b"\x00\x00\x00\x03test")
value = obj.unpack_block_be32()
self.assertIsInstance(value, str)
self.assertEqual(value, "tes")
self.assertEqual(obj.pos, 7)
obj = binstruct.Binstruct(b"\x03\x00\x00\x00test")
value = obj.unpack_block_le32()
self.assertIsInstance(value, str)
self.assertEqual(value, "tes")
self.assertEqual(obj.pos, 7)
obj = binstruct.Binstruct(b"\x00\x00\x00\x00test")
value = obj.unpack_block_be32()
self.assertIsInstance(value, str)
self.assertEqual(value, "")
self.assertEqual(obj.pos, 4)
obj = binstruct.Binstruct(b"\x00\x00\x00\x00test")
value = obj.unpack_block_le32()
self.assertIsInstance(value, str)
self.assertEqual(value, "")
self.assertEqual(obj.pos, 4)
def test_error(self):
obj = binstruct.Binstruct(b"\x05\x00\x00\x00test")
with self.assertRaises(binstruct.error):
obj.unpack_block_be32()
obj = binstruct.Binstruct(b"\x00\x00\x00\x05test")
with self.assertRaises(binstruct.error):
obj.unpack_block_le32()
obj = binstruct.Binstruct(b"")
with self.assertRaises(binstruct.error):
obj.unpack_block_be32()
with self.assertRaises(binstruct.error):
obj.unpack_block_le32()
obj = binstruct.Binstruct(b"test")
with self.assertRaises(TypeError):
obj.unpack_block_be32(None)
with self.assertRaises(TypeError):
obj.unpack_block_le32(None)
class TestUnpackBlock64(unittest.TestCase):
def test_block(self):
obj = binstruct.Binstruct(b"\x00\x00\x00\x00\x00\x00\x00\x03test")
value = obj.unpack_block_be64()
self.assertIsInstance(value, str)
self.assertEqual(value, "tes")
self.assertEqual(obj.pos, 11)
obj = binstruct.Binstruct(b"\x03\x00\x00\x00\x00\x00\x00\x00test")
value = obj.unpack_block_le64()
self.assertIsInstance(value, str)
self.assertEqual(value, "tes")
self.assertEqual(obj.pos, 11)
obj = binstruct.Binstruct(b"\x00\x00\x00\x00\x00\x00\x00\x00test")
value = obj.unpack_block_be64()
self.assertIsInstance(value, str)
self.assertEqual(value, "")
self.assertEqual(obj.pos, 8)
obj = binstruct.Binstruct(b"\x00\x00\x00\x00\x00\x00\x00\x00test")
value = obj.unpack_block_le64()
self.assertIsInstance(value, str)
self.assertEqual(value, "")
self.assertEqual(obj.pos, 8)
def test_error(self):
obj = binstruct.Binstruct(b"\x00\x00\x00\x00\x00\x00\x00\x05test")
with self.assertRaises(binstruct.error):
obj.unpack_block_be64()
obj = binstruct.Binstruct(b"\x05\x00\x00\x00\x00\x00\x00\x00test")
with self.assertRaises(binstruct.error):
obj.unpack_block_le64()
obj = binstruct.Binstruct(b"")
with self.assertRaises(binstruct.error):
obj.unpack_block_be64()
with self.assertRaises(binstruct.error):
obj.unpack_block_le64()
obj = binstruct.Binstruct(b"test")
with self.assertRaises(TypeError):
obj.unpack_block_be64(None)
with self.assertRaises(TypeError):
obj.unpack_block_le64(None)
class TestUnpackBlockULEB128(unittest.TestCase):
def test_block(self):
obj = binstruct.Binstruct(b"\x03test")
value = obj.unpack_block_uleb128()
self.assertIsInstance(value, str)
self.assertEqual(value, "tes")
self.assertEqual(obj.pos, 4)
obj = binstruct.Binstruct(b"\x84\x80\x80\x00test")
value = obj.unpack_block_uleb128()
self.assertIsInstance(value, str)
self.assertEqual(value, "test")
self.assertEqual(obj.pos, 8)
def test_error(self):
obj = binstruct.Binstruct(b"\x05test")
with self.assertRaises(binstruct.error):
obj.unpack_block_uleb128()
obj = binstruct.Binstruct(b"")
with self.assertRaises(binstruct.error):
obj.unpack_block_uleb128()
obj = binstruct.Binstruct(b"test")
with self.assertRaises(TypeError):
obj.unpack_block_uleb128(None)
if __name__ == '__main__':
unittest.main()
|
|
# Copyright 2010 United States Government as represented by the
# Administrator of the National Aeronautics and Space Administration.
# All Rights Reserved.
# Copyright 2011 Red Hat, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
A remote procedure call (rpc) abstraction.
For some wrappers that add message versioning to rpc, see:
rpc.dispatcher
rpc.proxy
"""
import inspect
from oslo.config import cfg
#from neutron.openstack.common.gettextutils import _
from charging.openstack.common import importutils
from charging.openstack.common import local
from charging.openstack.common import log as logging
LOG = logging.getLogger(__name__)
rpc_opts = [
cfg.StrOpt('rpc_backend',
default='%s.impl_kombu' % __package__,
help="The messaging module to use, defaults to kombu."),
cfg.IntOpt('rpc_thread_pool_size',
default=64,
help='Size of RPC thread pool'),
cfg.IntOpt('rpc_conn_pool_size',
default=30,
help='Size of RPC connection pool'),
cfg.IntOpt('rpc_response_timeout',
default=60,
help='Seconds to wait for a response from call or multicall'),
cfg.IntOpt('rpc_cast_timeout',
default=30,
help='Seconds to wait before a cast expires (TTL). '
'Only supported by impl_zmq.'),
cfg.ListOpt('allowed_rpc_exception_modules',
default=['nova.exception',
'cinder.exception',
'exceptions',
],
help='Modules of exceptions that are permitted to be recreated'
' upon receiving exception data from an rpc call.'),
cfg.BoolOpt('fake_rabbit',
default=False,
help='If passed, use a fake RabbitMQ provider'),
cfg.StrOpt('control_exchange',
default='openstack',
help='AMQP exchange to connect to if using RabbitMQ or Qpid'),
]
CONF = cfg.CONF
CONF.register_opts(rpc_opts)
def set_defaults(control_exchange):
cfg.set_defaults(rpc_opts,
control_exchange=control_exchange)
def create_connection(new=True):
"""Create a connection to the message bus used for rpc.
For some example usage of creating a connection and some consumers on that
connection, see nova.service.
:param new: Whether or not to create a new connection. A new connection
will be created by default. If new is False, the
implementation is free to return an existing connection from a
pool.
:returns: An instance of openstack.common.rpc.common.Connection
"""
return _get_impl().create_connection(CONF, new=new)
def _check_for_lock():
if not CONF.debug:
return None
if ((hasattr(local.strong_store, 'locks_held')
and local.strong_store.locks_held)):
stack = ' :: '.join([frame[3] for frame in inspect.stack()])
LOG.warn(_('A RPC is being made while holding a lock. The locks '
'currently held are %(locks)s. This is probably a bug. '
'Please report it. Include the following: [%(stack)s].'),
{'locks': local.strong_store.locks_held,
'stack': stack})
return True
return False
def call(context, topic, msg, timeout=None, check_for_lock=False):
"""Invoke a remote method that returns something.
:param context: Information that identifies the user that has made this
request.
:param topic: The topic to send the rpc message to. This correlates to the
topic argument of
openstack.common.rpc.common.Connection.create_consumer()
and only applies when the consumer was created with
fanout=False.
:param msg: This is a dict in the form { "method" : "method_to_invoke",
"args" : dict_of_kwargs }
:param timeout: int, number of seconds to use for a response timeout.
If set, this overrides the rpc_response_timeout option.
:param check_for_lock: if True, a warning is emitted if a RPC call is made
with a lock held.
:returns: A dict from the remote method.
:raises: openstack.common.rpc.common.Timeout if a complete response
is not received before the timeout is reached.
"""
if check_for_lock:
_check_for_lock()
return _get_impl().call(CONF, context, topic, msg, timeout)
def cast(context, topic, msg):
"""Invoke a remote method that does not return anything.
:param context: Information that identifies the user that has made this
request.
:param topic: The topic to send the rpc message to. This correlates to the
topic argument of
openstack.common.rpc.common.Connection.create_consumer()
and only applies when the consumer was created with
fanout=False.
:param msg: This is a dict in the form { "method" : "method_to_invoke",
"args" : dict_of_kwargs }
:returns: None
"""
return _get_impl().cast(CONF, context, topic, msg)
def fanout_cast(context, topic, msg):
"""Broadcast a remote method invocation with no return.
This method will get invoked on all consumers that were set up with this
topic name and fanout=True.
:param context: Information that identifies the user that has made this
request.
:param topic: The topic to send the rpc message to. This correlates to the
topic argument of
openstack.common.rpc.common.Connection.create_consumer()
and only applies when the consumer was created with
fanout=True.
:param msg: This is a dict in the form { "method" : "method_to_invoke",
"args" : dict_of_kwargs }
:returns: None
"""
return _get_impl().fanout_cast(CONF, context, topic, msg)
def multicall(context, topic, msg, timeout=None, check_for_lock=False):
"""Invoke a remote method and get back an iterator.
In this case, the remote method will be returning multiple values in
separate messages, so the return values can be processed as the come in via
an iterator.
:param context: Information that identifies the user that has made this
request.
:param topic: The topic to send the rpc message to. This correlates to the
topic argument of
openstack.common.rpc.common.Connection.create_consumer()
and only applies when the consumer was created with
fanout=False.
:param msg: This is a dict in the form { "method" : "method_to_invoke",
"args" : dict_of_kwargs }
:param timeout: int, number of seconds to use for a response timeout.
If set, this overrides the rpc_response_timeout option.
:param check_for_lock: if True, a warning is emitted if a RPC call is made
with a lock held.
:returns: An iterator. The iterator will yield a tuple (N, X) where N is
an index that starts at 0 and increases by one for each value
returned and X is the Nth value that was returned by the remote
method.
:raises: openstack.common.rpc.common.Timeout if a complete response
is not received before the timeout is reached.
"""
if check_for_lock:
_check_for_lock()
return _get_impl().multicall(CONF, context, topic, msg, timeout)
def notify(context, topic, msg, envelope=False):
"""Send notification event.
:param context: Information that identifies the user that has made this
request.
:param topic: The topic to send the notification to.
:param msg: This is a dict of content of event.
:param envelope: Set to True to enable message envelope for notifications.
:returns: None
"""
return _get_impl().notify(cfg.CONF, context, topic, msg, envelope)
def cleanup():
"""Clean up resources in use by implementation.
Clean up any resources that have been allocated by the RPC implementation.
This is typically open connections to a messaging service. This function
would get called before an application using this API exits to allow
connections to get torn down cleanly.
:returns: None
"""
return _get_impl().cleanup()
def cast_to_server(context, server_params, topic, msg):
"""Invoke a remote method that does not return anything.
:param context: Information that identifies the user that has made this
request.
:param server_params: Connection information
:param topic: The topic to send the notification to.
:param msg: This is a dict in the form { "method" : "method_to_invoke",
"args" : dict_of_kwargs }
:returns: None
"""
return _get_impl().cast_to_server(CONF, context, server_params, topic,
msg)
def fanout_cast_to_server(context, server_params, topic, msg):
"""Broadcast to a remote method invocation with no return.
:param context: Information that identifies the user that has made this
request.
:param server_params: Connection information
:param topic: The topic to send the notification to.
:param msg: This is a dict in the form { "method" : "method_to_invoke",
"args" : dict_of_kwargs }
:returns: None
"""
return _get_impl().fanout_cast_to_server(CONF, context, server_params,
topic, msg)
def queue_get_for(context, topic, host):
"""Get a queue name for a given topic + host.
This function only works if this naming convention is followed on the
consumer side, as well. For example, in nova, every instance of the
nova-foo service calls create_consumer() for two topics:
foo
foo.<host>
Messages sent to the 'foo' topic are distributed to exactly one instance of
the nova-foo service. The services are chosen in a round-robin fashion.
Messages sent to the 'foo.<host>' topic are sent to the nova-foo service on
<host>.
"""
return '%s.%s' % (topic, host) if host else topic
_RPCIMPL = None
def _get_impl():
"""Delay import of rpc_backend until configuration is loaded."""
global _RPCIMPL
if _RPCIMPL is None:
try:
_RPCIMPL = importutils.import_module(CONF.rpc_backend)
except ImportError:
# For backwards compatibility with older nova config.
impl = CONF.rpc_backend.replace('nova.rpc',
'nova.openstack.common.rpc')
_RPCIMPL = importutils.import_module(impl)
return _RPCIMPL
|
|
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""A collection of ORM sqlalchemy models for Superset"""
import enum
from cron_descriptor import get_description
from flask_appbuilder import Model
from flask_appbuilder.models.decorators import renders
from sqlalchemy import (
Boolean,
Column,
DateTime,
Float,
ForeignKey,
Integer,
String,
Table,
Text,
)
from sqlalchemy.orm import backref, relationship
from sqlalchemy.schema import UniqueConstraint
from sqlalchemy_utils import UUIDType
from superset.extensions import security_manager
from superset.models.core import Database
from superset.models.dashboard import Dashboard
from superset.models.helpers import AuditMixinNullable
from superset.models.slice import Slice
metadata = Model.metadata # pylint: disable=no-member
class ReportScheduleType(str, enum.Enum):
ALERT = "Alert"
REPORT = "Report"
class ReportScheduleValidatorType(str, enum.Enum):
""" Validator types for alerts """
NOT_NULL = "not null"
OPERATOR = "operator"
class ReportRecipientType(str, enum.Enum):
EMAIL = "Email"
SLACK = "Slack"
class ReportState(str, enum.Enum):
SUCCESS = "Success"
WORKING = "Working"
ERROR = "Error"
NOOP = "Not triggered"
GRACE = "On Grace"
class ReportDataFormat(str, enum.Enum):
VISUALIZATION = "PNG"
DATA = "CSV"
TEXT = "TEXT"
class ReportCreationMethodType(str, enum.Enum):
CHARTS = "charts"
DASHBOARDS = "dashboards"
ALERTS_REPORTS = "alerts_reports"
report_schedule_user = Table(
"report_schedule_user",
metadata,
Column("id", Integer, primary_key=True),
Column("user_id", Integer, ForeignKey("ab_user.id"), nullable=False),
Column(
"report_schedule_id", Integer, ForeignKey("report_schedule.id"), nullable=False
),
UniqueConstraint("user_id", "report_schedule_id"),
)
class ReportSchedule(Model, AuditMixinNullable):
"""
Report Schedules, supports alerts and reports
"""
__tablename__ = "report_schedule"
__table_args__ = (UniqueConstraint("name", "type"),)
id = Column(Integer, primary_key=True)
type = Column(String(50), nullable=False)
name = Column(String(150), nullable=False)
description = Column(Text)
context_markdown = Column(Text)
active = Column(Boolean, default=True, index=True)
crontab = Column(String(1000), nullable=False)
creation_method = Column(
String(255), server_default=ReportCreationMethodType.ALERTS_REPORTS
)
timezone = Column(String(100), default="UTC", nullable=False)
report_format = Column(String(50), default=ReportDataFormat.VISUALIZATION)
sql = Column(Text())
# (Alerts/Reports) M-O to chart
chart_id = Column(Integer, ForeignKey("slices.id"), nullable=True)
chart = relationship(Slice, backref="report_schedules", foreign_keys=[chart_id])
# (Alerts/Reports) M-O to dashboard
dashboard_id = Column(Integer, ForeignKey("dashboards.id"), nullable=True)
dashboard = relationship(
Dashboard, backref="report_schedules", foreign_keys=[dashboard_id]
)
# (Alerts) M-O to database
database_id = Column(Integer, ForeignKey("dbs.id"), nullable=True)
database = relationship(Database, foreign_keys=[database_id])
owners = relationship(security_manager.user_model, secondary=report_schedule_user)
# (Alerts) Stamped last observations
last_eval_dttm = Column(DateTime)
last_state = Column(String(50), default=ReportState.NOOP)
last_value = Column(Float)
last_value_row_json = Column(Text)
# (Alerts) Observed value validation related columns
validator_type = Column(String(100))
validator_config_json = Column(Text, default="{}")
# Log retention
log_retention = Column(Integer, default=90)
# (Alerts) After a success how long to wait for a new trigger (seconds)
grace_period = Column(Integer, default=60 * 60 * 4)
# (Alerts/Reports) Unlock a possible stalled working state
working_timeout = Column(Integer, default=60 * 60 * 1)
def __repr__(self) -> str:
return str(self.name)
@renders("crontab")
def crontab_humanized(self) -> str:
return get_description(self.crontab)
class ReportRecipients(
Model, AuditMixinNullable
): # pylint: disable=too-few-public-methods
"""
Report Recipients, meant to support multiple notification types, eg: Slack, email
"""
__tablename__ = "report_recipient"
id = Column(Integer, primary_key=True)
type = Column(String(50), nullable=False)
recipient_config_json = Column(Text, default="{}")
report_schedule_id = Column(
Integer, ForeignKey("report_schedule.id"), nullable=False
)
report_schedule = relationship(
ReportSchedule,
backref=backref("recipients", cascade="all,delete,delete-orphan"),
foreign_keys=[report_schedule_id],
)
class ReportExecutionLog(Model): # pylint: disable=too-few-public-methods
"""
Report Execution Log, hold the result of the report execution with timestamps,
last observation and possible error messages
"""
__tablename__ = "report_execution_log"
id = Column(Integer, primary_key=True)
uuid = Column(UUIDType(binary=True))
# Timestamps
scheduled_dttm = Column(DateTime, nullable=False)
start_dttm = Column(DateTime)
end_dttm = Column(DateTime)
# (Alerts) Observed values
value = Column(Float)
value_row_json = Column(Text)
state = Column(String(50), nullable=False)
error_message = Column(Text)
report_schedule_id = Column(
Integer, ForeignKey("report_schedule.id"), nullable=False
)
report_schedule = relationship(
ReportSchedule,
backref=backref("logs", cascade="all,delete,delete-orphan"),
foreign_keys=[report_schedule_id],
)
|
|
# --------------------------------------------------------
# Scene Graph Generation by Iterative Message Passing
# Licensed under The MIT License [see LICENSE for details]
# Written by Danfei Xu
# --------------------------------------------------------
"""
Train a scene graph generation network
"""
import tensorflow as tf
import numpy as np
import os
from fast_rcnn.config import cfg
from networks.factory import get_network
from networks import losses
from roi_data_layer.data_runner import DataRunnerMP
from roi_data_layer.layer import RoIDataLayer
from utils.timer import Timer
class Trainer(object):
def __init__(self, sess, net_name, imdb, roidb, output_dir, tf_log, pretrained_model=None):
"""Initialize the SolverWrapper."""
self.net_name = net_name
self.imdb = imdb
self.roidb = roidb
self.output_dir = output_dir
self.tf_log = tf_log
self.pretrained_model = pretrained_model
self.bbox_means = np.zeros((self.imdb.num_classes, 4))
self.bbox_stds = np.ones((self.imdb.num_classes, 4))
if cfg.TRAIN.BBOX_NORMALIZE_TARGETS:
print('Loaded precomputer bbox target distribution from %s' % \
cfg.TRAIN.BBOX_TARGET_NORMALIZATION_FILE)
bbox_dist = np.load(cfg.TRAIN.BBOX_TARGET_NORMALIZATION_FILE).item()
self.bbox_means = bbox_dist['means']
self.bbox_stds = bbox_dist['stds']
print 'done'
def snapshot(self, sess, iter):
"""Take a snapshot of the network after unnormalizing the learned
bounding-box regression weights. This enables easy use at test-time.
"""
net = self.net
if cfg.TRAIN.BBOX_REG and 'bbox_pred' in net.layers and cfg.TRAIN.BBOX_NORMALIZE_TARGETS:
# save original values
with tf.variable_scope('bbox_pred', reuse=True):
weights = tf.get_variable("weights")
biases = tf.get_variable("biases")
orig_0 = weights.eval()
orig_1 = biases.eval()
# scale and shift with bbox reg unnormalization; then save snapshot
weights_shape = weights.get_shape().as_list()
sess.run(weights.assign(orig_0 * np.tile(self.bbox_stds.ravel(), (weights_shape[0],1))))
sess.run(biases.assign(orig_1 * self.bbox_stds.ravel() + self.bbox_means.ravel()))
if not os.path.exists(self.output_dir):
os.makedirs(self.output_dir)
filename = os.path.join(self.output_dir, 'weights_%i.ckpt' % iter)
self.saver.save(sess, filename)
print 'Wrote snapshot to: {:s}'.format(filename)
if cfg.TRAIN.BBOX_REG and 'bbox_pred' in net.layers and cfg.TRAIN.BBOX_NORMALIZE_TARGETS:
# restore net to original state
sess.run(weights.assign(orig_0))
sess.run(biases.assign(orig_1))
def get_data_runner(self, sess, data_layer):
input_pls = {
'ims': tf.placeholder(dtype=tf.float32, shape=[None, None, None, 3]),
'rois': tf.placeholder(dtype=tf.float32, shape=[None, 5]),
'rel_rois': tf.placeholder(dtype=tf.float32, shape=[None, 5]),
'labels': tf.placeholder(dtype=tf.int32, shape=[None]),
'relations': tf.placeholder(dtype=tf.int32, shape=[None, 2]),
'predicates': tf.placeholder(dtype=tf.int32, shape=[None]),
'bbox_targets': tf.placeholder(dtype=tf.float32, shape=[None, 4 * self.imdb.num_classes]),
'bbox_inside_weights': tf.placeholder(dtype=tf.float32, shape=[None, 4 * self.imdb.num_classes]),
'num_roi': tf.placeholder(dtype=tf.int32, shape=[]), # number of rois per batch
'num_rel': tf.placeholder(dtype=tf.int32, shape=[]), # number of relationships per batch
'rel_mask_inds': tf.placeholder(dtype=tf.int32, shape=[None]),
'rel_segment_inds': tf.placeholder(dtype=tf.int32, shape=[None]),
'rel_pair_mask_inds': tf.placeholder(dtype=tf.int32, shape=[None,2]),
'rel_pair_segment_inds': tf.placeholder(dtype=tf.int32, shape=[None])
}
def data_generator():
while True:
yield data_layer.next_batch()
def task_generator():
while True:
yield data_layer._get_next_minibatch_inds()
task_func = data_layer._get_next_minibatch
data_runner = DataRunnerMP(task_func, task_generator, input_pls, capacity=24)
return data_runner
def train_model(self, sess, max_iters):
"""Network training loop."""
data_layer = RoIDataLayer(self.imdb, self.bbox_means, self.bbox_stds)
# a multi-process data runner
data_runner = self.get_data_runner(sess, data_layer)
inputs= data_runner.get_inputs()
inputs['num_classes'] = self.imdb.num_classes
inputs['num_predicates'] = self.imdb.num_predicates
inputs['n_iter'] = cfg.TRAIN.INFERENCE_ITER
self.net = get_network(self.net_name)(inputs)
self.net.setup()
# get network-defined losses
ops = self.net.losses()
# multitask loss
loss_list = [ops[k] for k in ops if k.startswith('loss')]
ops['loss_total'] = losses.total_loss_and_summaries(loss_list, 'total_loss')
# optimizer
lr = tf.Variable(cfg.TRAIN.LEARNING_RATE, trainable=False)
momentum = cfg.TRAIN.MOMENTUM
ops['train'] = tf.train.MomentumOptimizer(lr, momentum).minimize(ops['loss_total'])
ops_summary = dict(ops)
#merge summaries
ops_summary['summary'] = tf.merge_all_summaries()
train_writer = tf.train.SummaryWriter(self.tf_log, sess.graph)
self.saver = tf.train.Saver(tf.trainable_variables(), max_to_keep=None)
sess.run(tf.initialize_all_variables())
#data_runner.start_threads(sess, n_threads=10)
data_runner.start_processes(sess, n_processes=3)
# intialize variables
if self.pretrained_model is not None:
print ('Loading pretrained model '
'weights from {:s}').format(self.pretrained_model)
if self.pretrained_model.endswith('.npy'):
self.net.load(self.pretrained_model, sess, load_fc=True)
elif self.pretrained_model.endswith('.ckpt'):
self.saver.restore(sess, self.pretrained_model)
else:
print('Unsupported pretrained weights format')
raise
last_snapshot_iter = -1
timer = Timer()
iter_timer = Timer()
# Training loop
for iter in range(max_iters):
# learning rate
iter_timer.tic()
if (iter+1) % cfg.TRAIN.STEPSIZE == 0:
sess.run(tf.assign(lr, cfg.TRAIN.LEARNING_RATE * cfg.TRAIN.GAMMA))
# Make one SGD update
feed_dict = data_runner.get_feed_batch()
feed_dict[self.net.keep_prob] = 0.5
timer.tic()
if (iter + 1) % cfg.TRAIN.SUMMARY_FREQ == 0:
ops_value = sess.run(ops_summary, feed_dict=feed_dict)
train_writer.add_summary(ops_value['summary'], iter)
else:
ops_value = sess.run(ops, feed_dict=feed_dict)
timer.toc()
stats = 'iter: %d / %d, lr: %f' % (iter+1, max_iters, lr.eval())
for k in ops_value:
if k.startswith('loss'):
stats += ', %s: %4f' % (k, ops_value[k])
print(stats)
iter_timer.toc()
if (iter+1) % (10 * cfg.TRAIN.DISPLAY_FREQ) == 0:
print 'speed: {:.3f}s / iter'.format(timer.average_time)
print 'iter speed: {:.3f}s / iter'.format(iter_timer.average_time)
if (iter+1) % cfg.TRAIN.SNAPSHOT_FREQ == 0:
last_snapshot_iter = iter
self.snapshot(sess, iter)
if last_snapshot_iter != iter:
self.snapshot(sess, iter)
def train_net(network_name, imdb, roidb, output_dir, tf_log, pretrained_model=None, max_iters=200000):
config = tf.ConfigProto()
config.allow_soft_placement=True
# config.gpu_options.allow_growth=True
with tf.Session(config=config) as sess:
tf.set_random_seed(cfg.RNG_SEED)
trainer = Trainer(sess, network_name, imdb, roidb, output_dir, tf_log, pretrained_model=pretrained_model)
trainer.train_model(sess, max_iters)
|
|
# encoding: utf-8
import datetime
import json
import os
import difflib
import logging
import itertools
import tagging
import voting
from actstream import action
from django.contrib.auth.decorators import login_required
from django.core.exceptions import ObjectDoesNotExist
from django.core.urlresolvers import reverse
from django.db import DatabaseError
from django.http import (HttpResponseRedirect, HttpResponse, Http404,
HttpResponseBadRequest, HttpResponseForbidden)
from django.views.decorators.http import require_http_methods
from django.shortcuts import render_to_response, get_object_or_404
from django.template import RequestContext
from django.utils.decorators import method_decorator
from django.utils.translation import ugettext_lazy, ugettext as _
from django.views.decorators.csrf import ensure_csrf_cookie
from django.db.models import Q
from tagging.models import Tag, TaggedItem
from agendas.models import Agenda, UserSuggestedVote, Link
from laws.vote_choices import BILL_STAGE_CHOICES
from ok_tag.views import BaseTagMemberListView
from auxiliary.mixins import CsvView
from forms import VoteSelectForm, BillSelectForm, BudgetEstimateForm
from forms import AttachBillFromVoteForm
from hashnav import DetailView, ListView as HashnavListView
from knesset.utils import notify_responsible_adult
from mks.models import Member
from models import Bill, BillBudgetEstimation, Vote, KnessetProposal, VoteAction
logger = logging.getLogger("open-knesset.laws.views")
def bill_tags_cloud(request, min_posts_count=1):
member = None
if 'member' in request.GET:
try:
member = Member.objects.get(pk=request.GET['member'])
except (Member.DoesNotExist, ValueError):
raise Http404
tags_cloud = Tag.objects.usage_for_queryset(member.bills.all(),
counts=True)
tags_cloud = tagging.utils.calculate_cloud(tags_cloud)
title = _('Bills by %(member)s by tag') % {'member': member.name}
else:
title = _('Bills by tag')
tags_cloud = Tag.objects.cloud_for_model(Bill)
return render_to_response(
"laws/bill_tags_cloud.html",
{"tags_cloud": tags_cloud, "title": title, "member": member},
context_instance=RequestContext(request))
class BillTagsView(BaseTagMemberListView):
template_name = 'laws/bill_list_by_tag.html'
url_to_reverse = 'bill-tag'
def get_queryset(self):
tag_instance = self.tag_instance
member = self.member
if member:
qs = member.bills.all()
else:
qs = Bill
queryset = TaggedItem.objects.get_by_model(qs, tag_instance)
return queryset
def get_bill_proposers_cloud(self):
bill_proposers = [
b.proposers.all() for b in
TaggedItem.objects.get_by_model(Bill, self.tag_instance)]
d = {}
for bill in bill_proposers:
for p in bill:
d[p] = d.get(p, 0) + 1
# now d is a dict: MK -> number of proposals in this tag
mks = d.keys()
for mk in mks:
mk.count = d[mk]
return tagging.utils.calculate_cloud(mks)
def get_context_data(self, *args, **kwargs):
context = super(BillTagsView, self).get_context_data(*args, **kwargs)
if self.member:
context['title'] = _('Bills tagged %(tag)s by %(member)s') % {
'tag': self.kwargs['tag'],
'member': self.member.name
}
else: # only tag is given
context['title'] = _('Bills tagged %(tag)s') % {
'tag': self.kwargs['tag']}
context['members'] = self.get_bill_proposers_cloud()
return context
@require_http_methods(["GET"])
def bill_auto_complete(request):
if not 'query' in request.GET:
raise Http404
options = Bill.objects.filter(
full_title__icontains=request.GET['query'])[:30]
data = []
suggestions = []
for i in options:
data.append(i.id)
suggestions.append(i.full_title)
result = {'query': request.GET['query'],
'suggestions': suggestions,
'data': data}
return HttpResponse(json.dumps(result), mimetype='application/json')
def vote_tags_cloud(request, min_posts_count=1):
member = None
if 'member' in request.GET:
try:
member = Member.objects.get(pk=request.GET['member'])
except (Member.DoesNotExist, ValueError):
raise Http404
tags_cloud = Tag.objects.usage_for_queryset(member.votes.all(),
counts=True)
tags_cloud = tagging.utils.calculate_cloud(tags_cloud)
title = _('Votes by %(member)s by tag') % {'member': member.name}
else:
title = _('Votes by tag')
tags_cloud = Tag.objects.cloud_for_model(Vote)
return render_to_response(
"laws/vote_tags_cloud.html",
{"tags_cloud": tags_cloud, "title": title, "member": member},
context_instance=RequestContext(request))
class VoteTagsView(BaseTagMemberListView):
template_name = 'laws/vote_list_by_tag.html'
url_to_reverse = 'vote-tag'
def get_queryset(self):
tag_instance = self.tag_instance
member = self.member
if member:
qs = member.votes.all()
else:
qs = Vote
return TaggedItem.objects.get_by_model(qs, tag_instance)
def get_mks_cloud(self):
vote_attendence = [
v.votes.all() for v in
TaggedItem.objects.get_by_model(Vote, self.tag_instance)]
d = {}
for vote in vote_attendence:
for v in vote:
d[v] = d.get(v, 0) + 1
# now d is a dict: MK -> number of votes in this tag
mks = d.keys()
if mks:
for mk in mks:
mk.count = d[mk]
average = float(sum([mk.count for mk in mks])) / len(mks)
mks = [mk for mk in mks if mk.count >= average]
return tagging.utils.calculate_cloud(mks)
else:
return None
def get_context_data(self, *args, **kwargs):
context = super(VoteTagsView, self).get_context_data(*args, **kwargs)
if self.member:
context['title'] = ugettext_lazy(
'Votes tagged %(tag)s by %(member)s') % {
'tag': self.tag_instance.name, 'member': self.member.name}
else: # only tag is given
context['title'] = ugettext_lazy('Votes tagged %(tag)s') % {
'tag': self.tag_instance.name}
mks = self.get_mks_cloud()
if mks:
context['members'] = mks
return context
# TODO: already converted to generic ListView above,
# remove once verified working
#
# def vote_tag(request, tag):
# tag_instance = get_tag(tag)
# if tag_instance is None:
# raise Http404(_('No Tag found matching "%s".') % tag)
#
# extra_context = {'tag':tag_instance}
# extra_context['tag_url'] = reverse('vote-tag',args=[tag_instance])
# if 'member' in request.GET:
# extra_context['member'] = get_object_or_404(Member, pk=request.GET['member'])
# extra_context['member_url'] = reverse('member-detail',args=[extra_context['member'].id])
# extra_context['title'] = ugettext_lazy('Votes tagged %(tag)s by %(member)s') % {'tag': tag, 'member':extra_context['member'].name}
# qs = extra_context['member'].votes.all()
# else: # only tag is given
# extra_context['title'] = ugettext_lazy('Votes tagged %(tag)s') % {'tag': tag}
# qs = Vote
#
# queryset = TaggedItem.objects.get_by_model(qs, tag_instance)
# vote_attendence = [v.votes.all() for v in TaggedItem.objects.get_by_model(Vote, tag_instance)]
# d = {}
# for vote in vote_attendence:
# for v in vote:
# d[v] = d.get(v,0)+1
# # now d is a dict: MK -> number of votes in this tag
# mks = d.keys()
# if mks:
# for mk in mks:
# mk.count = d[mk]
# average = float(sum([mk.count for mk in mks]))/len(mks)
# mks = [mk for mk in mks if mk.count>=average]
# mks = tagging.utils.calculate_cloud(mks)
# extra_context['members'] = mks
# if request.user.is_authenticated():
# extra_context['watched_members'] = \
# request.user.get_profile().members
# else:
# extra_context['watched_members'] = False
#
# return object_list(request, queryset,
# #return tagged_object_list(request, queryset_or_model=qs, tag=tag,
# template_name='laws/vote_list_by_tag.html', extra_context=extra_context)
#
def votes_to_bar_widths(v_count, v_for, v_against):
""" a helper function to compute presentation widths for user votes bars.
v_count - the total votes count
v_for - votes for
v_against - votes against
returns: a tuple (width of for bar, width of against bar) in percent
"""
m = 12 # minimal width for small bar
T = 96 # total width for the 2 bars
if v_count: # use votes/count, with margin m
width_for = min(max(int(float(v_for) / v_count * T), m), 100 - m)
else: # 0 votes, use 50:50 width
width_for = round(T / 2)
width_against = T - width_for
return (width_for, width_against)
class BillCsvView(CsvView):
model = Bill
file_path_and_name = ['csv', 'bills.csv']
filename = os.path.join(*file_path_and_name)
list_display = (('full_title', _('Full Title')),
('popular_name', _('Popular Name')),
('get_stage_display', _('Stage')),
('stage_date', _('Stage Date')),
('pre_votes', _('Pre-Votes')),
('first_committee_meetings', _('First Committee Meetings')),
('first_vote', _('First Vote')),
('second_committee_meetings', _('Second Committee Meetings')),
('approval_vote', _('Approval Vote')),
('proposers', _('Proposers')),
('joiners', _('Joiners')))
def get_queryset(self, **kwargs):
try:
return self.model.objects.select_related('law',
'first_vote',
'approval_vote') \
.prefetch_related('joiners',
'proposers',
'pre_votes',
'first_committee_meetings',
'second_committee_meetings')
except DatabaseError: # sqlite can't prefetch this query, because it has
# too many objects
return self.model.objects.all()
def community_meeting_gen(self, obj, attr):
'''
A helper function to compute presentation of community meetings url list, space separated
: param obj: The object instance
: param attr: The object attribute
: return : A string with the urls comma-separated
'''
host = self.request.build_absolute_uri("/")
return " ".join(host + row.get_absolute_url() for row in getattr(obj, attr).all())
def members_gen(self, obj, attr):
'''
A helper function to compute presentation of members, comma separated
: param obj: The object instance
: param attr: The object attribute
: return : A string with the urls comma-separated
'''
return ", ".join(row.name for row in getattr(obj, attr).all())
def proposers(self, obj, attr):
return self.members_gen(obj, attr)
def joiners(self, obj, attr):
return self.members_gen(obj, attr)
def first_committee_meetings(self, obj, attr):
return self.community_meeting_gen(obj, attr)
def second_committee_meetings(self, obj, attr):
return self.community_meeting_gen(obj, attr)
def pre_votes(self, obj, attr):
return self.community_meeting_gen(obj, attr)
class BillDetailView(DetailView):
allowed_methods = ['get', 'post']
model = Bill
@method_decorator(ensure_csrf_cookie)
def dispatch(self, *args, **kwargs):
return super(BillDetailView, self).dispatch(*args, **kwargs)
def get_object(self):
try:
return super(BillDetailView, self).get_object()
except Http404:
self.slug_field = "popular_name_slug"
return super(BillDetailView, self).get_object()
def get_context_data(self, *args, **kwargs):
context = super(BillDetailView, self).get_context_data(*args, **kwargs)
bill = context['object']
if bill.popular_name:
context["keywords"] = bill.popular_name
if self.request.user.is_authenticated():
userprofile = self.request.user.profiles.get()
context['watched'] = bill in userprofile.bills
else:
context['watched'] = False
userprofile = None
# compute data for user votes on this bill
proposers = bill.proposers.select_related('current_party')
links = list(Link.objects.for_model(Member))
links_by_member = {}
for k, g in itertools.groupby(links, lambda x: x.object_pk):
links_by_member[str(k)] = list(g)
for proposer in proposers:
proposer.cached_links = links_by_member.get(str(proposer.pk), [])
context['proposers'] = proposers
votes = voting.models.Vote.objects.get_object_votes(bill)
if 1 not in votes: votes[1] = 0
if -1 not in votes: votes[-1] = 0
count = votes[1] + votes[-1]
score = {'for': votes[1],
'against': votes[-1],
'total': votes[1] - votes[-1],
'count': count}
(score['for_percent'], score['against_percent']) = votes_to_bar_widths(
count, score['for'], score['against'])
# Count only votes by users that are members of parties
party_member_votes = voting.models.Vote.objects.get_for_object(
bill).filter(user__profiles__party__isnull=False,
is_archived=False)
votes_for = party_member_votes.filter(direction=1).count()
votes_against = party_member_votes.filter(direction=-1).count()
count = votes_for + votes_against
party_voting_score = {'for': votes_for, 'against': votes_against,
'total': votes_for - votes_against,
'count': count}
(party_voting_score['for_percent'], party_voting_score['against_percent']) = votes_to_bar_widths(
count, party_voting_score['for'], party_voting_score['against'])
# Count only votes by users that are members of the party of the viewing
# user
if userprofile and userprofile.party:
user_party_member_votes = voting.models.Vote.objects.get_for_object(
bill).filter(user__profiles__party=userprofile.party,
is_archived=False)
votes_for = user_party_member_votes.filter(direction=1).count()
votes_against = user_party_member_votes.filter(direction=-1).count()
count = votes_for + votes_against
user_party_voting_score = {'for': votes_for, 'against': votes_against,
'total': votes_for - votes_against,
'count': count}
(user_party_voting_score['for_percent'],
user_party_voting_score['against_percent']) = votes_to_bar_widths(
count, user_party_voting_score['for'], user_party_voting_score['against'])
else:
user_party_voting_score = None
context['voting_score'] = score
context['party_voting_score'] = party_voting_score
context['user_party_voting_score'] = user_party_voting_score
context['tags'] = list(bill.tags)
context['budget_ests'] = list(bill.budget_ests.all())
if self.request.user:
context['user_has_be'] = bill.budget_ests.filter(estimator__username=str(self.request.user)).count()
if 'budget_ests_form' in kwargs:
context['budget_ests_form'] = kwargs['budget_ests_form']
else:
context['budget_ests_form'] = BudgetEstimateForm(bill, self.request.user)
return context
@method_decorator(login_required)
def post(self, request, **kwargs):
object_id = kwargs['pk']
if not object_id:
return HttpResponseBadRequest()
bill = get_object_or_404(Bill, pk=object_id)
user_input_type = request.POST.get('user_input_type')
vote_types = ['approval vote', 'first vote', 'pre vote']
if user_input_type in vote_types:
i = vote_types.index(user_input_type)
vote = Vote.objects.get(pk=request.POST.get('vote_id'))
if i == 0:
bill.approval_vote = vote
elif i == 1:
bill.first_vote = vote
elif i == 2:
bill.pre_votes.add(vote)
else:
# FIXME: maybe different response.
return HttpResponseRedirect(".")
bill.update_stage()
action.send(request.user, verb='added-vote-to-bill',
description=vote,
target=bill,
timestamp=datetime.datetime.now())
elif user_input_type == 'budget_est':
try:
budget_est = BillBudgetEstimation.objects.get(bill=bill, estimator=request.user)
except BillBudgetEstimation.DoesNotExist:
budget_est = BillBudgetEstimation(bill=bill, estimator=request.user)
# FIXME: breakage! sanitize!
form = BudgetEstimateForm(bill, request.user, request.POST)
if form.is_valid():
budget_est.one_time_gov = form.cleaned_data['be_one_time_gov']
budget_est.yearly_gov = form.cleaned_data['be_yearly_gov']
budget_est.one_time_ext = form.cleaned_data['be_one_time_ext']
budget_est.yearly_ext = form.cleaned_data['be_yearly_ext']
budget_est.summary = form.cleaned_data['be_summary']
budget_est.save()
else:
return self.get(request, budget_ests_form=form)
# botg = request.POST.get('be_one_time_gov')
# byg = request.POST.get('be_yearly_gov')
# bote = request.POST.get('be_one_time_ext')
# bye = request.POST.get('be_yearly_ext')
# bs = request.POST.get('be_summary')
# budget_est.one_time_gov = int(botg) if botg != "" else None
# budget_est.yearly_gov = int(byg) if byg != "" else None
# budget_est.one_time_ext = int(bote) if bote != "" else None
# budget_est.yearly_ext = int(bye) if bye != "" else None
# budget_est.summary = bs if bs != "" else None
elif user_input_type == 'change_bill_name':
if request.user.has_perm('laws.change_bill') and 'bill_name' in request.POST.keys():
new_title = request.POST.get('bill_name')
new_popular_name = request.POST.get('popular_name')
logger.info('user %d is updating bill %s. new_title=%s, new_popular_name=%s' %
(request.user.id, object_id, new_title,
new_popular_name))
Bill.objects.filter(pk=object_id).update(title=new_title, full_title=new_title,
popular_name=new_popular_name)
else:
return HttpResponseForbidden()
elif user_input_type == 'knesset_proposal':
kp = KnessetProposal.objects.get(pk=request.POST.get('kp_id'))
if not kp.bill: # kp already has a bill
kp.bill = bill
kp.save()
else:
return HttpResponseBadRequest()
return HttpResponseRedirect(".")
_('added-vote-to-bill')
@login_required
def bill_unbind_vote(request, object_id, vote_id):
try:
bill = Bill.objects.get(pk=object_id)
vote = Vote.objects.get(pk=vote_id)
except ObjectDoesNotExist:
raise Http404
if request.method == 'POST': # actually unbind
explanation = request.POST.get('explanation', '')
msg = u'%s is unbinding vote %s from bill %s. explanation: %s' % \
(str(request.user).decode('utf8'),
vote_id,
object_id,
explanation)
notify_responsible_adult(msg)
logger.info(msg)
if vote in bill.pre_votes.all():
bill.pre_votes.remove(vote)
if vote == bill.first_vote:
bill.first_vote = None
if vote == bill.approval_vote:
bill.approval_vote = None
bill.update_stage(force_update=True)
return HttpResponseRedirect(reverse('bill-detail', args=[object_id]))
else: # approve unbind
context = RequestContext(request,
{'object': bill, 'vote': vote})
return render_to_response("laws/bill_unbind_vote.html", context)
class BillListMixin(object):
"""Mixin for using both bill index index and "more" views"""
def get_queryset(self):
member = self.request.GET.get('member', False)
options = {}
if member:
try:
member = int(member)
except ValueError:
raise Http404(_('Invalid member id'))
member = get_object_or_404(Member, pk=member)
qs = member.bills
else:
qs = Bill.objects
form = self._get_filter_form()
if form.is_bound and form.is_valid():
options = form.cleaned_data
return qs.filter_and_order(**options)
def _get_filter_form(self):
form = BillSelectForm(self.request.GET) if self.request.GET \
else BillSelectForm()
return form
class BillListView(BillListMixin, HashnavListView):
friend_pages = [
('stage', 'all', _('All stages')),
]
friend_pages.extend([('stage', x[0], _(x[1])) for x in BILL_STAGE_CHOICES])
bill_stages_names = {
'proposed': _('(Bills) proposed'),
'pre': _('(Bills) passed pre-vote'),
'first': _('(Bills) passed first vote'),
'approved': _('(Bills) approved'),
}
def get_context(self):
context = super(BillListView, self).get_context()
r = [['?%s=%s' % (x[0], x[1]), x[2], False, x[1]] for x in self.friend_pages]
stage = self.request.GET.get('stage', False)
pp_id = self.request.GET.get('pp_id', False)
knesset_booklet = self.request.GET.get('knesset_booklet', False)
gov_booklet = self.request.GET.get('gov_booklet', False)
member = self.request.GET.get('member', False)
if stage and stage != 'all':
for x in r:
if x[3] == stage:
x[2] = True
break
if stage in self.bill_stages_names:
context['stage'] = self.bill_stages_names.get(stage)
context['title'] = _('Bills %(stage)s') % {'stage':
context['stage']}
elif pp_id:
context['title'] = _('Bills based on private proposal with id '
'%s') % pp_id
elif knesset_booklet:
context['title'] = _('Bills published in knesset booklet '
'number %s') % knesset_booklet
elif gov_booklet:
context['title'] = _('Bills published in government booklet '
'number %s') % gov_booklet
else:
# TODO: WTF!?
r[0][2] = True
if member:
context['member'] = get_object_or_404(Member, pk=member)
context['member_url'] = reverse('member-detail', args=[context['member'].id])
if stage in self.bill_stages_names:
context['title'] = _('Bills %(stage)s by %(member)s') % {'stage': self.bill_stages_names[stage],
'member': context['member'].name}
else:
context['title'] = _('Bills by %(member)s') % {'member': context['member'].name}
context['friend_pages'] = r
context['form'] = self._get_filter_form()
context['query_string'] = self.request.META['QUERY_STRING']
return context
class BillMoreView(BillListMixin):
"TODO: Implement me once bills is converted from pagination to get more"
pass
class VoteListView(HashnavListView):
def get_queryset(self, **kwargs):
form = self._get_filter_form()
if form.is_bound and form.is_valid():
options = form.cleaned_data
else:
options = {}
if options.get('exclude_user_agendas', False) and \
self.request.user.is_authenticated():
options['exclude_agendas'] = self.request.user.agendas.all()
return Vote.objects.filter_and_order(**options)
def _get_filter_form(self):
form = VoteSelectForm(self.request.GET) if self.request.GET \
else VoteSelectForm()
return form
def get_context(self):
context = super(VoteListView, self).get_context()
if self.request.user.is_authenticated():
context['watched_members'] = \
self.request.user.get_profile().members
else:
context['watched_members'] = False
context['form'] = self._get_filter_form()
context['query_string'] = self.request.META['QUERY_STRING']
return context
class VoteCsvView(CsvView):
model = Vote
file_path_and_name = ['csv', 'votes.csv']
filename = os.path.join(*file_path_and_name)
list_display = (('title', _('Title')),
('vote_type', _('Vote Type')),
('time', _('Time')),
('votes_count', _('Votes Count')),
('for_votes_count', _('For')),
('against_votes_count', _('Against')),
('against_party', _('Votes Against Party')),
('against_coalition', _('Votes Against Coalition')),
('against_opposition', _('Votes Against Opposition')),
('against_own_bill', _('Votes Against Own Bill')))
def get_queryset(self, **kwargs):
form = VoteSelectForm(self.request.GET or {})
if form.is_bound and form.is_valid():
options = form.cleaned_data
else:
options = {}
return Vote.objects.filter_and_order(**options)
class VoteDetailView(DetailView):
model = Vote
template_resource_name = 'vote'
def get_context_data(self, *args, **kwargs):
context = super(VoteDetailView, self).get_context_data(*args, **kwargs)
vote = context['vote']
related_bills = list(vote.bills_pre_votes.all())
if Bill.objects.filter(approval_vote=vote).count() > 0:
related_bills.append(vote.bill_approved)
if Bill.objects.filter(first_vote=vote).count() > 0:
related_bills.extend(vote.bills_first.all())
for_votes = vote.for_votes().select_related('member', 'member__current_party')
against_votes = vote.against_votes().select_related('member', 'member__current_party')
abstain_votes = vote.abstain_votes().select_related('member', 'member__current_party')
try:
next_v = vote.get_next_by_time()
next_v = next_v.get_absolute_url()
except Vote.DoesNotExist:
next_v = None
try:
prev_v = vote.get_previous_by_time()
prev_v = prev_v.get_absolute_url()
except Vote.DoesNotExist:
prev_v = None
c = {'title': vote.title,
'bills': related_bills,
'for_votes': for_votes,
'against_votes': against_votes,
'abstain_votes': abstain_votes,
'next_v': next_v,
'prev_v': prev_v,
'tags': vote.tags,
}
context.update(c)
# Add bill form
if 'bill_form' in kwargs:
context['bill_form'] = kwargs['bill_form']
else:
context['bill_form'] = AttachBillFromVoteForm(vote)
return context
@method_decorator(login_required)
def post(self, request, *args, **kwargs):
object_id = kwargs['pk']
try:
object_id = int(kwargs['pk'])
except:
return HttpResponseBadRequest()
user_input_type = request.POST.get('user_input_type', None)
vote = get_object_or_404(Vote, pk=object_id)
mk_names = Member.objects.values_list('name', flat=True)
if user_input_type == 'agenda':
try:
agenda_id = int(request.POST.get('agenda'))
except:
return HttpResponseBadRequest()
agenda = Agenda.objects.get(pk=agenda_id)
reasoning = request.POST.get('reasoning', '')
usv = UserSuggestedVote.objects.filter(user=request.user,
agenda=agenda,
vote=vote)
if usv:
usv = usv[0]
usv.reasoning = reasoning
usv.sent_to_editor = False
usv.save()
else:
usv = UserSuggestedVote(user=request.user,
agenda=agenda,
vote=vote,
reasoning=reasoning)
usv.save()
elif user_input_type == 'add-bill':
form = AttachBillFromVoteForm(vote, request.POST)
if form.is_valid():
vote_type = form.cleaned_data['vote_type']
bill = form.cleaned_data['bill_model']
if vote_type == 'approve vote':
bill.approval_vote = vote
elif vote_type == 'first vote':
bill.first_vote = vote
elif vote_type == 'pre vote':
bill.pre_votes.add(vote)
bill.update_stage()
else:
return self.get(request, bill_form=form)
else: # adding an MK (either for or against)
mk_name = difflib.get_close_matches(request.POST.get('mk_name'), mk_names)[0]
mk = Member.objects.get(name=mk_name)
stand = None
if user_input_type == 'mk-for':
stand = 'for'
if user_input_type == 'mk-against':
stand = 'against'
if stand:
va = VoteAction.objects.filter(member=mk, vote=vote)
if va:
va = va[0]
va.type = stand
va.save()
else:
va = VoteAction(member=mk, vote=vote, type=stand)
va.save()
vote.update_vote_properties()
return HttpResponseRedirect('.')
# TODO: Looks like it's unused,
# if so, needs to be removed as it's uses removed function based generic views
#
# def tagged(request,tag):
# title = ugettext_lazy('Votes tagged %(tag)s') % {'tag': tag}
# try:
# return tagged_object_list(request, queryset_or_model = Vote, tag=tag, extra_context={'title':title})
# except Http404:
# return object_list(request, queryset=Vote.objects.none(), extra_context={'title':title})
@require_http_methods(["GET"])
def vote_auto_complete(request):
if not 'query' in request.GET:
raise Http404
options = Vote.objects.filter(title__icontains=request.GET['query'])[:30]
data = []
suggestions = []
for i in options:
formatted_date = i.time.date().strftime('%d/%m/%Y')
title = u'{0} - {1}'.format(formatted_date, i.title)
data.append(i.id)
suggestions.append(title)
result = {'query': request.GET['query'],
'suggestions': suggestions,
'data': data}
return HttpResponse(json.dumps(result), mimetype='application/json')
@require_http_methods(["GET"])
def knesset_proposal_auto_complete(request):
if not request.GET.get('query'):
raise Http404
q = request.GET['query']
if q.isdigit():
options = KnessetProposal.objects.filter(booklet_number=q)[0:30]
else:
options = KnessetProposal.objects.filter(
Q(title__icontains=q) |
Q(law__title__icontains=q))[:30]
data = []
suggestions = []
for i in options:
formatted_date = i.date.strftime('%d/%m/%Y')
title = u'{0} - {1} - {2}'.format(formatted_date, i.law.title, i.title)
data.append(i.id)
suggestions.append(title)
result = {'query': request.GET['query'],
'suggestions': suggestions,
'data': data}
return HttpResponse(json.dumps(result), mimetype='application/json')
def embed_bill_details(request, object_id):
# TODO(shmichael): Only use the last stream item of each type, and if we find
# contradictions, send to human operator for sanitizing.
bill = get_object_or_404(Bill, pk=object_id)
context = RequestContext(request, {'bill': bill})
return render_to_response("laws/embed_bill_detail.html", context)
|
|
# -*- coding: utf-8 -*-
from __future__ import print_function
from acq4.util import Qt
from acq4.analysis.AnalysisModule import AnalysisModule
from collections import OrderedDict
import acq4.pyqtgraph as pg
from acq4.util.metaarray import MetaArray
import numpy as np
class ImageAnalysis(AnalysisModule):
def __init__(self, host):
AnalysisModule.__init__(self, host)
self.background = None
#self.view = pg.GraphicsView()
self.ctrl = Qt.QWidget()
l = Qt.QGridLayout()
self.ctrl.setLayout(l)
self.ctrl.layout = l
#self.loadBgBtn = Qt.QPushButton('load reference')
#l.addWidget(self.loadBgBtn, 0, 0)
self.addRoiBtn = Qt.QPushButton('add ROI')
l.addWidget(self.addRoiBtn, 0, 0)
s = Qt.QSpinBox()
s.setMaximum(10)
s.setMinimum(1)
self.nsegSpin = s
l.addWidget(s, 1, 0)
self.rois = []
self.data = []
## Setup basic GUI
self._elements_ = OrderedDict([
('File Loader', {'type': 'fileInput', 'size': (200, 300), 'host': self, 'showFileTree': False}),
('Image', {'type': 'imageView', 'pos': ('right', 'File Loader'), 'size': (800, 300)}),
('Time Plot', {'type': 'plot', 'pos': ('bottom',), 'size': (800, 300)}),
('Trial Plot', {'type': 'plot', 'pos': ('bottom', 'Time Plot'), 'size': (800, 300)}),
('Line Scan', {'type': 'imageView', 'pos': ('right', 'Time Plot'), 'size': (800, 300)}),
#('Data Table', {'type': 'table', 'pos': ('below', 'Time Plot')}),
('Ctrl', {'type': 'ctrl', 'pos': ('bottom', 'File Loader'), 'size': (200,30), 'object': self.ctrl}),
])
self.initializeElements()
#self.traces = None
self.plot = self.getElement('Time Plot', create=True)
self.plot2 = self.getElement('Trial Plot', create=True)
self.lr = pg.LinearRegionItem([0, 1])
self.plot.addItem(self.lr)
self.view = self.getElement('Image', create=True)
## Add a color scale
## removed for now--seems to be causing crashes :(
#self.colorScale = pg.GradientLegend(self.plot1, (20, 150), (-10, -10))
#self.plot1.scene().addItem(self.colorScale)
## Plots are updated when the selected region changes
self.lr.sigRegionChanged.connect(self.updateAnalysis)
self.addRoiBtn.clicked.connect(self.addRoi)
self.view.sigProcessingChanged.connect(self.processData)
#self.loadBgBtn.clicked.connect(self.loadBg)
def addRoi(self):
if self.nsegSpin.value() == 1:
roi = pg.widgets.LineROI((0,0), (20, 20), 5)
else:
pts = [(i*10,i*10) for i in range(self.nsegSpin.value()+1)]
roi = pg.widgets.MultiLineROI(pts, 5)
self.rois.append(roi)
self.view.addItem(roi)
roi.sigRegionChanged.connect(self.roiChanged)
def roiChanged(self, roi):
if isinstance(roi, int):
roi = self.currentRoi
self.plot.clearPlots()
c = 0
lineScans = []
for imgSet in self.data:
data = roi.getArrayRegion(imgSet['procMean'], self.view.imageItem, axes=(1,2))
m = data.mean(axis=1).mean(axis=1)
lineScans.append(data.mean(axis=2))
spacer = np.empty((lineScans[-1].shape[0], 1), dtype = lineScans[-1].dtype)
spacer[:] = lineScans[-1].min()
lineScans.append(spacer)
data = roi.getArrayRegion(imgSet['procStd'], self.view.imageItem, axes=(1,2))
s = data.mean(axis=1).mean(axis=1)
self.plot.plot(m, pen=pg.hsvColor(c*0.2, 1.0, 1.0))
self.plot.plot(m-s, pen=pg.hsvColor(c*0.2, 1.0, 0.4))
self.plot.plot(m+s, pen=pg.hsvColor(c*0.2, 1.0, 0.4))
c += 1
lineScan = np.hstack(lineScans)
self.getElement('Line Scan').setImage(lineScan)
self.currentRoi = roi
def processData(self):
self.normData = []
self.data = []
for img in self.rawData:
n = np.empty(img.shape, dtype=img.dtype)
for i in range(img.shape[0]):
n[i] = self.view.normalize(img[i])
self.normData.append(n)
imgSet = {'procMean': n.mean(axis=0), 'procStd': n.std(axis=0)}
self.data.append(imgSet)
def updateAnalysis(self):
roi = self.currentRoi
plot = self.getElement('Trial Plot')
plot.clearPlots()
c = 0
for img in self.normData:
#img = img.mean(axis=1)
rgn = self.lr.getRegion()
img = img[:, rgn[0]:rgn[1]].mean(axis=1)
data = roi.getArrayRegion(img, self.view.imageItem, axes=(1,2))
m = data.mean(axis=1).mean(axis=1)
#data = roi.getArrayRegion(img, self.view.imageItem, axes=(1,2))
#s = data.mean(axis=1).mean(axis=1)
plot.plot(m, pen=pg.hsvColor(c*0.2, 1.0, 1.0))
#self.plot.plot(m-s, pen=pg.hsvColor(c*0.2, 1.0, 0.4))
#self.plot.plot(m+s, pen=pg.hsvColor(c*0.2, 1.0, 0.4))
c += 1
#if c == 1:
#self.getElement('Line Scan').setImage(data.mean(axis=2))
#if self.traces is None:
#return
#rgn = self.lr.getRegion()
#data = self.traces['Time': rgn[0]:rgn[1]]
#self.plot2.plot(data.mean(axis=1), clear=True)
#self.plot2.plot(data.max(axis=1))
#self.plot2.plot(data.min(axis=1))
def loadFileRequested(self, dh):
"""Called by file loader when a file load is requested."""
if len(dh) != 1:
raise Exception("Can only load one file at a time.")
dh = dh[0]
if dh.isFile():
self.background = dh.read()[np.newaxis,...].astype(float)
self.background /= self.background.max()
return
self.plot.clearPlots()
dirs = dh.subDirs()
images = [[],[],[],[]]
## Iterate over sequence
minFrames = None
for d in dirs:
d = dh[d]
try:
ind = d.info()[('Clamp1', 'amp')]
except:
print(d)
print(d.info())
raise
img = d['Camera/frames.ma'].read()
images[ind].append(img)
if minFrames is None or img.shape[0] < minFrames:
minFrames = img.shape[0]
self.rawData = []
self.data = []
#print "len images: %d " % (len(images))
while len(images) > 0:
imgs = images.pop(0)
img = np.concatenate([i[np.newaxis,:minFrames,...] for i in imgs], axis=0)
self.rawData.append(img.astype(np.float32))
#img /= self.background
## remove bleaching curve from first two axes
ctrlMean = self.rawData[0].mean(axis=2).mean(axis=2)
trialCurve = ctrlMean.mean(axis=1)[:,np.newaxis,np.newaxis,np.newaxis]
timeCurve = ctrlMean.mean(axis=0)[np.newaxis,:,np.newaxis,np.newaxis]
del ctrlMean
for img in self.rawData:
img /= trialCurve
img /= timeCurve
#for img in self.rawData:
#m = img.mean(axis=0)
#s = img.std(axis=0)
#if self.background is not None:
#m = m.astype(np.float32)
#m /= self.background
#s = s.astype(np.float32)
#s /= self.background
#imgSet = {'mean': m, 'std': s}
#self.data.append(imgSet)
#self.imgMeans.append(m)
#self.imgStds.append(s)
self.view.setImage(self.rawData[1].mean(axis=0))
self.processData()
## set up the selection region correctly and prepare IV curves
#if len(dirs) > 0:
#end = cmd.xvals('Time')[-1]
#self.lr.setRegion([end *0.5, end * 0.6])
#self.updateAnalysis()
#info = [
#{'name': 'Command', 'units': cmd.axisUnits(-1), 'values': np.array(values)},
#data.infoCopy('Time'),
#data.infoCopy(-1)]
#self.traces = MetaArray(np.vstack(traces), info=info)
return True
|
|
"""Tests for the Google Assistant integration."""
from asynctest.mock import MagicMock
from homeassistant.components.google_assistant import helpers
def mock_google_config_store(agent_user_ids=None):
"""Fake a storage for google assistant."""
store = MagicMock(spec=helpers.GoogleConfigStore)
if agent_user_ids is not None:
store.agent_user_ids = agent_user_ids
else:
store.agent_user_ids = {}
return store
class MockConfig(helpers.AbstractConfig):
"""Fake config that always exposes everything."""
def __init__(
self,
*,
secure_devices_pin=None,
should_expose=None,
entity_config=None,
hass=None,
local_sdk_webhook_id=None,
local_sdk_user_id=None,
enabled=True,
agent_user_ids=None,
):
"""Initialize config."""
super().__init__(hass)
self._should_expose = should_expose
self._secure_devices_pin = secure_devices_pin
self._entity_config = entity_config or {}
self._local_sdk_webhook_id = local_sdk_webhook_id
self._local_sdk_user_id = local_sdk_user_id
self._enabled = enabled
self._store = mock_google_config_store(agent_user_ids)
@property
def enabled(self):
"""Return if Google is enabled."""
return self._enabled
@property
def secure_devices_pin(self):
"""Return secure devices pin."""
return self._secure_devices_pin
@property
def entity_config(self):
"""Return secure devices pin."""
return self._entity_config
@property
def local_sdk_webhook_id(self):
"""Return local SDK webhook id."""
return self._local_sdk_webhook_id
@property
def local_sdk_user_id(self):
"""Return local SDK webhook id."""
return self._local_sdk_user_id
def should_expose(self, state):
"""Expose it all."""
return self._should_expose is None or self._should_expose(state)
BASIC_CONFIG = MockConfig()
DEMO_DEVICES = [
{
"id": "light.kitchen_lights",
"name": {"name": "Kitchen Lights"},
"traits": [
"action.devices.traits.OnOff",
"action.devices.traits.Brightness",
"action.devices.traits.ColorSetting",
],
"type": "action.devices.types.LIGHT",
"willReportState": False,
},
{
"id": "switch.ac",
"name": {"name": "AC"},
"traits": ["action.devices.traits.OnOff"],
"type": "action.devices.types.SWITCH",
"willReportState": False,
},
{
"id": "switch.decorative_lights",
"name": {"name": "Decorative Lights"},
"traits": ["action.devices.traits.OnOff"],
"type": "action.devices.types.SWITCH",
"willReportState": False,
},
{
"id": "light.ceiling_lights",
"name": {"name": "Roof Lights", "nicknames": ["top lights", "ceiling lights"]},
"traits": [
"action.devices.traits.OnOff",
"action.devices.traits.Brightness",
"action.devices.traits.ColorSetting",
],
"type": "action.devices.types.LIGHT",
"willReportState": False,
},
{
"id": "light.bed_light",
"name": {"name": "Bed Light"},
"traits": [
"action.devices.traits.OnOff",
"action.devices.traits.Brightness",
"action.devices.traits.ColorSetting",
],
"type": "action.devices.types.LIGHT",
"willReportState": False,
},
{
"id": "group.all_lights",
"name": {"name": "all lights"},
"traits": ["action.devices.traits.OnOff"],
"type": "action.devices.types.SWITCH",
"willReportState": False,
},
{
"id": "group.all_switches",
"name": {"name": "all switches"},
"traits": ["action.devices.traits.OnOff"],
"type": "action.devices.types.SWITCH",
"willReportState": False,
},
{
"id": "cover.living_room_window",
"name": {"name": "Living Room Window"},
"traits": ["action.devices.traits.OpenClose"],
"type": "action.devices.types.BLINDS",
"willReportState": False,
},
{
"id": "cover.hall_window",
"name": {"name": "Hall Window"},
"traits": ["action.devices.traits.OpenClose"],
"type": "action.devices.types.BLINDS",
"willReportState": False,
},
{
"id": "cover.garage_door",
"name": {"name": "Garage Door"},
"traits": ["action.devices.traits.OpenClose"],
"type": "action.devices.types.GARAGE",
"willReportState": False,
},
{
"id": "cover.kitchen_window",
"name": {"name": "Kitchen Window"},
"traits": ["action.devices.traits.OpenClose"],
"type": "action.devices.types.BLINDS",
"willReportState": False,
},
{
"id": "group.all_covers",
"name": {"name": "all covers"},
"traits": ["action.devices.traits.OnOff"],
"type": "action.devices.types.SWITCH",
"willReportState": False,
},
{
"id": "media_player.bedroom",
"name": {"name": "Bedroom"},
"traits": [
"action.devices.traits.OnOff",
"action.devices.traits.Volume",
"action.devices.traits.Modes",
],
"type": "action.devices.types.SWITCH",
"willReportState": False,
},
{
"id": "media_player.living_room",
"name": {"name": "Living Room"},
"traits": [
"action.devices.traits.OnOff",
"action.devices.traits.Volume",
"action.devices.traits.Modes",
],
"type": "action.devices.types.SWITCH",
"willReportState": False,
},
{
"id": "media_player.lounge_room",
"name": {"name": "Lounge room"},
"traits": ["action.devices.traits.OnOff", "action.devices.traits.Modes"],
"type": "action.devices.types.SWITCH",
"willReportState": False,
},
{
"id": "media_player.walkman",
"name": {"name": "Walkman"},
"traits": ["action.devices.traits.OnOff", "action.devices.traits.Volume"],
"type": "action.devices.types.SWITCH",
"willReportState": False,
},
{
"id": "fan.living_room_fan",
"name": {"name": "Living Room Fan"},
"traits": ["action.devices.traits.FanSpeed", "action.devices.traits.OnOff"],
"type": "action.devices.types.FAN",
"willReportState": False,
},
{
"id": "fan.ceiling_fan",
"name": {"name": "Ceiling Fan"},
"traits": ["action.devices.traits.FanSpeed", "action.devices.traits.OnOff"],
"type": "action.devices.types.FAN",
"willReportState": False,
},
{
"id": "group.all_fans",
"name": {"name": "all fans"},
"traits": ["action.devices.traits.OnOff"],
"type": "action.devices.types.SWITCH",
"willReportState": False,
},
{
"id": "climate.hvac",
"name": {"name": "Hvac"},
"traits": ["action.devices.traits.TemperatureSetting"],
"type": "action.devices.types.THERMOSTAT",
"willReportState": False,
"attributes": {
"availableThermostatModes": "off,heat,cool,heatcool,auto,dry,fan-only",
"thermostatTemperatureUnit": "C",
},
},
{
"id": "climate.heatpump",
"name": {"name": "HeatPump"},
"traits": ["action.devices.traits.TemperatureSetting"],
"type": "action.devices.types.THERMOSTAT",
"willReportState": False,
},
{
"id": "climate.ecobee",
"name": {"name": "Ecobee"},
"traits": ["action.devices.traits.TemperatureSetting"],
"type": "action.devices.types.THERMOSTAT",
"willReportState": False,
},
{
"id": "lock.front_door",
"name": {"name": "Front Door"},
"traits": ["action.devices.traits.LockUnlock"],
"type": "action.devices.types.LOCK",
"willReportState": False,
},
{
"id": "lock.kitchen_door",
"name": {"name": "Kitchen Door"},
"traits": ["action.devices.traits.LockUnlock"],
"type": "action.devices.types.LOCK",
"willReportState": False,
},
{
"id": "lock.openable_lock",
"name": {"name": "Openable Lock"},
"traits": ["action.devices.traits.LockUnlock"],
"type": "action.devices.types.LOCK",
"willReportState": False,
},
{
"id": "alarm_control_panel.alarm",
"name": {"name": "Alarm"},
"traits": ["action.devices.traits.ArmDisarm"],
"type": "action.devices.types.SECURITYSYSTEM",
"willReportState": False,
},
]
|
|
import asyncio
import json
import os
from os import listdir
from os.path import isfile, join
from discord.ext import commands
from bot import restricted_cmd
# noinspection PyTypeChecker
class Admin:
def __init__(self, bot):
self.bot = bot
def init_cogs(self):
cogs_list = self.list_cogs()
cogs_data = self.get_cogs_config()
for cog_file in cogs_list:
cog_already_added = False
if cog_file == "admin" or cog_file == "__init__":
continue
for cog in cogs_data["cogs"]:
if cog["name"] == cog_file:
cog_already_added = True
if cog["enabled"]:
try:
self.bot.load_extension(cog["name"])
self.bot.log_info("Loaded cog " + cog["name"].upper(), __name__)
except Exception as e:
self.bot.log_error(cog["name"] + " " + str(e), __name__)
pass
cog_folder = os.getcwd() + "/data/" + cog_file
try:
os.mkdir(cog_folder)
except FileExistsError:
pass
if not cog_already_added:
try:
self.bot.load_extension(cog_file)
except ImportError:
self.bot.log_error("Failed to load cog " + cog_file.upper(), __name__)
except Exception as e:
self.bot.log_error("Failed to load cog " + cog_file.upper() + str(e), __name__)
try:
self.add_cog(cog_file)
except Exception as e:
self.bot.log_error("" + str(e), __name__)
pass
@commands.group(hidden=True)
@asyncio.coroutine
@restricted_cmd()
def cogs(self, ctx):
"""[R] For configuring cogs"""
if ctx.invoked_subcommand is None:
yield from ctx.send("Invalid sub-command!\n use `?help cogs` to view sub-commands")
@cogs.command()
@asyncio.coroutine
@restricted_cmd()
def enable(self, ctx, cog_name):
if self.does_cog_exist(cog_name):
self.enable_cog(cog_name)
try:
self.bot.load_extension(cog_name)
except Exception as e:
yield from ctx.send(e)
return
yield from ctx.send("Enabled cog: `" + cog_name + "`")
self.bot.log_info("Enabled cog {}".format(cog_name), __name__)
else:
yield from ctx.send("No cog found with name: `" + cog_name + "`")
self.bot.log_info("No cog found with name {}".format(cog_name), __name__)
@cogs.command()
@asyncio.coroutine
@restricted_cmd()
def disable(self, ctx, cog_name):
if self.does_cog_exist(cog_name):
self.disable_cog(cog_name)
try:
self.bot.unload_extension(cog_name)
except Exception as e:
yield from ctx.send(e)
return
yield from ctx.send("Disabled cog: `" + cog_name + "`")
self.bot.log_info("Disabled cog {}".format(cog_name), __name__)
else:
yield from ctx.send("No cog found with name: `" + cog_name + "`")
self.bot.log_info("No cog found with name {}".format(cog_name), __name__)
@cogs.command()
@asyncio.coroutine
def list(self, ctx):
cogs_list = self.list_cogs()
joined_cogs = '` `'.join([str(x) for x in cogs_list])
yield from ctx.send("`" + str(joined_cogs) + "`")
def enable_cog(self, cog_name):
cogs = self.get_cogs_config()
for x in range(0, len(cogs["cogs"])):
if cogs["cogs"][x]["name"] == cog_name:
cogs["cogs"][x]["enabled"] = True
self.bot.log_info("Enabled cog " + cogs["cogs"][x]["name"].upper(), __name__)
self.set_cogs(cogs)
def disable_cog(self, cog_name):
cogs = self.get_cogs_config()
for x in range(0, len(cogs["cogs"])):
if cogs["cogs"][x]["name"] == cog_name:
cogs["cogs"][x]["enabled"] = False
self.bot.log_info("Disabled cog " + cogs["cogs"][x]["name"].upper(), __name__)
self.set_cogs(cogs)
def add_cog(self, cog_name: str):
cogs = self.get_cogs_config()
self.bot.log_info("Adding cog: " + cog_name.upper(), __name__)
found_cog = False
for cog in cogs["cogs"]:
if cog["name"] == cog_name:
found_cog = True
if not found_cog:
new_cog = cogs["blank"]
new_cog["name"] = str(cog_name)
new_cog["enabled"] = True
cogs["cogs"].append(new_cog)
self.bot.log_info("Loaded cog: " + cog_name.upper(), __name__)
self.set_cogs(cogs)
def remove_cog(self, cog_name):
cogs = self.get_cogs_config()
for cog in cogs["cogs"]:
if cog["name"] == cog_name:
del cog
self.set_cogs(cogs)
def get_cogs_config(self):
try:
with open(os.getcwd() + "/data/admin/cogs.json", "r") as data_file:
cogs_list = json.load(data_file)
except FileNotFoundError:
self.create_config()
with open(os.getcwd() + "/data/admin/cogs.json", "r") as data_file:
cogs_list = json.load(data_file)
return cogs_list
def set_cogs(self, cog_data):
with open(os.getcwd() + "/data/admin/cogs.json", "w") as game_file:
json.dump(cog_data, game_file, sort_keys=True, indent=4)
def does_cog_exist(self, cog_name):
cogs = self.get_cogs_config()
cog_exists = False
for x in range(0, len(cogs["cogs"])):
if cogs["cogs"][x]["name"] == cog_name:
cog_exists = True
break
return cog_exists
def list_cogs(self):
cogs_dir = os.getcwd() + "/cogs/"
cogs_list = [f for f in listdir(cogs_dir) if isfile(join(cogs_dir, f))]
cogs_list_cleaned = []
for cog in cogs_list:
cog = cog[0:-3]
if cog == "__init__":
pass
else:
cogs_list_cleaned.append(cog)
return cogs_list_cleaned
def create_config(self):
config = {"blank": {"enabled": True, "name": ""}, "cogs": []}
try:
os.mkdir(os.getcwd() + "/data/admin/")
except FileExistsError:
pass
config_file = os.getcwd() + "/data/admin/cogs.json"
with open(config_file, "w") as file:
json.dump(config, file, sort_keys=True, indent=4)
def setup(bot):
admin_cog = Admin(bot)
try:
os.makedirs(os.getcwd() + "/data/")
except FileExistsError:
pass
if not os.path.isfile(os.getcwd() + "/data/admin/cogs.json"):
admin_cog.create_config()
bot.add_cog(admin_cog)
admin_cog.init_cogs()
|
|
import re
import collections
from enum import Enum
from ydk._core._dm_meta_info import _MetaInfoClassMember, _MetaInfoClass, _MetaInfoEnum
from ydk.types import Empty, YList, YLeafList, DELETE, Decimal64, FixedBitsDict
from ydk._core._dm_meta_info import ATTRIBUTE, REFERENCE_CLASS, REFERENCE_LIST, REFERENCE_LEAFLIST, REFERENCE_IDENTITY_CLASS, REFERENCE_ENUM_CLASS, REFERENCE_BITS, REFERENCE_UNION, ANYXML_CLASS
from ydk.errors import YPYError, YPYModelError
from ydk.providers._importer import _yang_ns
_meta_table = {
'SrgAddrFamilyEnum' : _MetaInfoEnum('SrgAddrFamilyEnum', 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_subscriber_srg_cfg',
{
'ipv4':'ipv4',
'ipv6':'ipv6',
}, 'Cisco-IOS-XR-subscriber-srg-cfg', _yang_ns._namespaces['Cisco-IOS-XR-subscriber-srg-cfg']),
'SubscriberRedundancyGroupRoleEnum' : _MetaInfoEnum('SubscriberRedundancyGroupRoleEnum', 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_subscriber_srg_cfg',
{
'master':'master',
'slave':'slave',
}, 'Cisco-IOS-XR-subscriber-srg-cfg', _yang_ns._namespaces['Cisco-IOS-XR-subscriber-srg-cfg']),
'SubscriberRedundancyGroupSlaveModeEnum' : _MetaInfoEnum('SubscriberRedundancyGroupSlaveModeEnum', 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_subscriber_srg_cfg',
{
'warm':'warm',
'hot':'hot',
}, 'Cisco-IOS-XR-subscriber-srg-cfg', _yang_ns._namespaces['Cisco-IOS-XR-subscriber-srg-cfg']),
'SubscriberRedundancy.Groups.Group.InterfaceList.Interfaces.Interface' : {
'meta_info' : _MetaInfoClass('SubscriberRedundancy.Groups.Group.InterfaceList.Interfaces.Interface',
False,
[
_MetaInfoClassMember('interface-name', ATTRIBUTE, 'str' , None, None,
[], [b'(([a-zA-Z0-9_]*\\d+/){3,4}\\d+)|(([a-zA-Z0-9_]*\\d+/){3,4}\\d+\\.\\d+)|(([a-zA-Z0-9_]*\\d+/){2}([a-zA-Z0-9_]*\\d+))|(([a-zA-Z0-9_]*\\d+/){2}([a-zA-Z0-9_]+))|([a-zA-Z0-9_-]*\\d+)|([a-zA-Z0-9_-]*\\d+\\.\\d+)|(mpls)|(dwdm)'],
''' Interface name
''',
'interface_name',
'Cisco-IOS-XR-subscriber-srg-cfg', True),
_MetaInfoClassMember('interface-id', ATTRIBUTE, 'int' , None, None,
[('1', '65535')], [],
''' Interface Id for the interface
''',
'interface_id',
'Cisco-IOS-XR-subscriber-srg-cfg', False),
],
'Cisco-IOS-XR-subscriber-srg-cfg',
'interface',
_yang_ns._namespaces['Cisco-IOS-XR-subscriber-srg-cfg'],
'ydk.models.cisco_ios_xr.Cisco_IOS_XR_subscriber_srg_cfg'
),
},
'SubscriberRedundancy.Groups.Group.InterfaceList.Interfaces' : {
'meta_info' : _MetaInfoClass('SubscriberRedundancy.Groups.Group.InterfaceList.Interfaces',
False,
[
_MetaInfoClassMember('interface', REFERENCE_LIST, 'Interface' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_subscriber_srg_cfg', 'SubscriberRedundancy.Groups.Group.InterfaceList.Interfaces.Interface',
[], [],
''' Interface for this Group
''',
'interface',
'Cisco-IOS-XR-subscriber-srg-cfg', False),
],
'Cisco-IOS-XR-subscriber-srg-cfg',
'interfaces',
_yang_ns._namespaces['Cisco-IOS-XR-subscriber-srg-cfg'],
'ydk.models.cisco_ios_xr.Cisco_IOS_XR_subscriber_srg_cfg'
),
},
'SubscriberRedundancy.Groups.Group.InterfaceList.InterfaceRanges.InterfaceRange' : {
'meta_info' : _MetaInfoClass('SubscriberRedundancy.Groups.Group.InterfaceList.InterfaceRanges.InterfaceRange',
False,
[
_MetaInfoClassMember('interface-name', ATTRIBUTE, 'str' , None, None,
[], [b'(([a-zA-Z0-9_]*\\d+/){3,4}\\d+)|(([a-zA-Z0-9_]*\\d+/){3,4}\\d+\\.\\d+)|(([a-zA-Z0-9_]*\\d+/){2}([a-zA-Z0-9_]*\\d+))|(([a-zA-Z0-9_]*\\d+/){2}([a-zA-Z0-9_]+))|([a-zA-Z0-9_-]*\\d+)|([a-zA-Z0-9_-]*\\d+\\.\\d+)|(mpls)|(dwdm)'],
''' Interface name
''',
'interface_name',
'Cisco-IOS-XR-subscriber-srg-cfg', True),
_MetaInfoClassMember('sub-interface-range-start', ATTRIBUTE, 'int' , None, None,
[('0', '2147483647')], [],
''' Sub Interface Start Range
''',
'sub_interface_range_start',
'Cisco-IOS-XR-subscriber-srg-cfg', True),
_MetaInfoClassMember('sub-interface-range-end', ATTRIBUTE, 'int' , None, None,
[('0', '2147483647')], [],
''' Sub Interface End Range
''',
'sub_interface_range_end',
'Cisco-IOS-XR-subscriber-srg-cfg', True),
_MetaInfoClassMember('interface-id-range-end', ATTRIBUTE, 'int' , None, None,
[('1', '65535')], [],
''' Interface ID End Range
''',
'interface_id_range_end',
'Cisco-IOS-XR-subscriber-srg-cfg', False),
_MetaInfoClassMember('interface-id-range-start', ATTRIBUTE, 'int' , None, None,
[('1', '65535')], [],
''' Interface ID Start Range
''',
'interface_id_range_start',
'Cisco-IOS-XR-subscriber-srg-cfg', False),
],
'Cisco-IOS-XR-subscriber-srg-cfg',
'interface-range',
_yang_ns._namespaces['Cisco-IOS-XR-subscriber-srg-cfg'],
'ydk.models.cisco_ios_xr.Cisco_IOS_XR_subscriber_srg_cfg'
),
},
'SubscriberRedundancy.Groups.Group.InterfaceList.InterfaceRanges' : {
'meta_info' : _MetaInfoClass('SubscriberRedundancy.Groups.Group.InterfaceList.InterfaceRanges',
False,
[
_MetaInfoClassMember('interface-range', REFERENCE_LIST, 'InterfaceRange' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_subscriber_srg_cfg', 'SubscriberRedundancy.Groups.Group.InterfaceList.InterfaceRanges.InterfaceRange',
[], [],
''' Interface for this Group
''',
'interface_range',
'Cisco-IOS-XR-subscriber-srg-cfg', False),
],
'Cisco-IOS-XR-subscriber-srg-cfg',
'interface-ranges',
_yang_ns._namespaces['Cisco-IOS-XR-subscriber-srg-cfg'],
'ydk.models.cisco_ios_xr.Cisco_IOS_XR_subscriber_srg_cfg'
),
},
'SubscriberRedundancy.Groups.Group.InterfaceList' : {
'meta_info' : _MetaInfoClass('SubscriberRedundancy.Groups.Group.InterfaceList',
False,
[
_MetaInfoClassMember('enable', ATTRIBUTE, 'Empty' , None, None,
[], [],
''' Enable List of Interfaces for this Group.
Deletion of this object also causes deletion
of all associated objects under InterfaceList
.
''',
'enable',
'Cisco-IOS-XR-subscriber-srg-cfg', False),
_MetaInfoClassMember('interface-ranges', REFERENCE_CLASS, 'InterfaceRanges' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_subscriber_srg_cfg', 'SubscriberRedundancy.Groups.Group.InterfaceList.InterfaceRanges',
[], [],
''' Table of InterfaceRange
''',
'interface_ranges',
'Cisco-IOS-XR-subscriber-srg-cfg', False),
_MetaInfoClassMember('interfaces', REFERENCE_CLASS, 'Interfaces' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_subscriber_srg_cfg', 'SubscriberRedundancy.Groups.Group.InterfaceList.Interfaces',
[], [],
''' Table of Interface
''',
'interfaces',
'Cisco-IOS-XR-subscriber-srg-cfg', False),
],
'Cisco-IOS-XR-subscriber-srg-cfg',
'interface-list',
_yang_ns._namespaces['Cisco-IOS-XR-subscriber-srg-cfg'],
'ydk.models.cisco_ios_xr.Cisco_IOS_XR_subscriber_srg_cfg'
),
},
'SubscriberRedundancy.Groups.Group.Peer.Ipaddress' : {
'meta_info' : _MetaInfoClass('SubscriberRedundancy.Groups.Group.Peer.Ipaddress',
False,
[
_MetaInfoClassMember('address-family', REFERENCE_ENUM_CLASS, 'SrgAddrFamilyEnum' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_subscriber_srg_cfg', 'SrgAddrFamilyEnum',
[], [],
''' Type of IPv4/IPv6 address
''',
'address_family',
'Cisco-IOS-XR-subscriber-srg-cfg', False),
_MetaInfoClassMember('prefix-string', REFERENCE_UNION, 'str' , None, None,
[], [],
''' IPv4/IPv6 address
''',
'prefix_string',
'Cisco-IOS-XR-subscriber-srg-cfg', False, [
_MetaInfoClassMember('prefix-string', ATTRIBUTE, 'str' , None, None,
[], [b'(([0-9]|[1-9][0-9]|1[0-9][0-9]|2[0-4][0-9]|25[0-5])\\.){3}([0-9]|[1-9][0-9]|1[0-9][0-9]|2[0-4][0-9]|25[0-5])(%[\\p{N}\\p{L}]+)?'],
''' IPv4/IPv6 address
''',
'prefix_string',
'Cisco-IOS-XR-subscriber-srg-cfg', False),
_MetaInfoClassMember('prefix-string', ATTRIBUTE, 'str' , None, None,
[], [b'((:|[0-9a-fA-F]{0,4}):)([0-9a-fA-F]{0,4}:){0,5}((([0-9a-fA-F]{0,4}:)?(:|[0-9a-fA-F]{0,4}))|(((25[0-5]|2[0-4][0-9]|[01]?[0-9]?[0-9])\\.){3}(25[0-5]|2[0-4][0-9]|[01]?[0-9]?[0-9])))(%[\\p{N}\\p{L}]+)?'],
''' IPv4/IPv6 address
''',
'prefix_string',
'Cisco-IOS-XR-subscriber-srg-cfg', False),
]),
],
'Cisco-IOS-XR-subscriber-srg-cfg',
'ipaddress',
_yang_ns._namespaces['Cisco-IOS-XR-subscriber-srg-cfg'],
'ydk.models.cisco_ios_xr.Cisco_IOS_XR_subscriber_srg_cfg'
),
},
'SubscriberRedundancy.Groups.Group.Peer' : {
'meta_info' : _MetaInfoClass('SubscriberRedundancy.Groups.Group.Peer',
False,
[
_MetaInfoClassMember('ipaddress', REFERENCE_CLASS, 'Ipaddress' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_subscriber_srg_cfg', 'SubscriberRedundancy.Groups.Group.Peer.Ipaddress',
[], [],
''' IPv4 or IPv6 Address of SRG Peer
''',
'ipaddress',
'Cisco-IOS-XR-subscriber-srg-cfg', False),
_MetaInfoClassMember('route-add-disable', ATTRIBUTE, 'Empty' , None, None,
[], [],
''' Set Route add disable
''',
'route_add_disable',
'Cisco-IOS-XR-subscriber-srg-cfg', False),
],
'Cisco-IOS-XR-subscriber-srg-cfg',
'peer',
_yang_ns._namespaces['Cisco-IOS-XR-subscriber-srg-cfg'],
'ydk.models.cisco_ios_xr.Cisco_IOS_XR_subscriber_srg_cfg'
),
},
'SubscriberRedundancy.Groups.Group.RevertiveTimer' : {
'meta_info' : _MetaInfoClass('SubscriberRedundancy.Groups.Group.RevertiveTimer',
False,
[
_MetaInfoClassMember('max-value', ATTRIBUTE, 'int' , None, None,
[('1', '65535')], [],
''' Value of MAX Revertive Timer
''',
'max_value',
'Cisco-IOS-XR-subscriber-srg-cfg', False),
_MetaInfoClassMember('value', ATTRIBUTE, 'int' , None, None,
[('1', '65535')], [],
''' Value of revertive time in minutes
''',
'value',
'Cisco-IOS-XR-subscriber-srg-cfg', False),
],
'Cisco-IOS-XR-subscriber-srg-cfg',
'revertive-timer',
_yang_ns._namespaces['Cisco-IOS-XR-subscriber-srg-cfg'],
'ydk.models.cisco_ios_xr.Cisco_IOS_XR_subscriber_srg_cfg'
),
},
'SubscriberRedundancy.Groups.Group.VirtualMac' : {
'meta_info' : _MetaInfoClass('SubscriberRedundancy.Groups.Group.VirtualMac',
False,
[
_MetaInfoClassMember('address', ATTRIBUTE, 'str' , None, None,
[], [b'[0-9a-fA-F]{2}(:[0-9a-fA-F]{2}){5}'],
''' Virtual MAC Address for this Group
''',
'address',
'Cisco-IOS-XR-subscriber-srg-cfg', False),
_MetaInfoClassMember('disable', ATTRIBUTE, 'Empty' , None, None,
[], [],
''' Disable Virtual MAC Address for this Group
''',
'disable',
'Cisco-IOS-XR-subscriber-srg-cfg', False),
],
'Cisco-IOS-XR-subscriber-srg-cfg',
'virtual-mac',
_yang_ns._namespaces['Cisco-IOS-XR-subscriber-srg-cfg'],
'ydk.models.cisco_ios_xr.Cisco_IOS_XR_subscriber_srg_cfg'
),
},
'SubscriberRedundancy.Groups.Group.StateControlRoute.Ipv4Route' : {
'meta_info' : _MetaInfoClass('SubscriberRedundancy.Groups.Group.StateControlRoute.Ipv4Route',
False,
[
_MetaInfoClassMember('address-family', REFERENCE_ENUM_CLASS, 'SrgAddrFamilyEnum' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_subscriber_srg_cfg', 'SrgAddrFamilyEnum',
[], [],
''' Type of IPv4 address with prefix-length
''',
'address_family',
'Cisco-IOS-XR-subscriber-srg-cfg', False),
_MetaInfoClassMember('prefix-length', ATTRIBUTE, 'int' , None, None,
[('-2147483648', '2147483647')], [],
''' Prefix of the IP Address
''',
'prefix_length',
'Cisco-IOS-XR-subscriber-srg-cfg', False),
_MetaInfoClassMember('prefix-string', REFERENCE_UNION, 'str' , None, None,
[], [],
''' IPv4 address with prefix-length
''',
'prefix_string',
'Cisco-IOS-XR-subscriber-srg-cfg', False, [
_MetaInfoClassMember('prefix-string', ATTRIBUTE, 'str' , None, None,
[], [b'(([0-9]|[1-9][0-9]|1[0-9][0-9]|2[0-4][0-9]|25[0-5])\\.){3}([0-9]|[1-9][0-9]|1[0-9][0-9]|2[0-4][0-9]|25[0-5])(%[\\p{N}\\p{L}]+)?'],
''' IPv4 address with prefix-length
''',
'prefix_string',
'Cisco-IOS-XR-subscriber-srg-cfg', False),
_MetaInfoClassMember('prefix-string', ATTRIBUTE, 'str' , None, None,
[], [b'((:|[0-9a-fA-F]{0,4}):)([0-9a-fA-F]{0,4}:){0,5}((([0-9a-fA-F]{0,4}:)?(:|[0-9a-fA-F]{0,4}))|(((25[0-5]|2[0-4][0-9]|[01]?[0-9]?[0-9])\\.){3}(25[0-5]|2[0-4][0-9]|[01]?[0-9]?[0-9])))(%[\\p{N}\\p{L}]+)?'],
''' IPv4 address with prefix-length
''',
'prefix_string',
'Cisco-IOS-XR-subscriber-srg-cfg', False),
]),
_MetaInfoClassMember('value', ATTRIBUTE, 'int' , None, None,
[('-2147483648', '2147483647')], [],
''' Tag value
''',
'value',
'Cisco-IOS-XR-subscriber-srg-cfg', False),
],
'Cisco-IOS-XR-subscriber-srg-cfg',
'ipv4-route',
_yang_ns._namespaces['Cisco-IOS-XR-subscriber-srg-cfg'],
'ydk.models.cisco_ios_xr.Cisco_IOS_XR_subscriber_srg_cfg'
),
},
'SubscriberRedundancy.Groups.Group.StateControlRoute.Ipv6Route.Ipv6NaRoute' : {
'meta_info' : _MetaInfoClass('SubscriberRedundancy.Groups.Group.StateControlRoute.Ipv6Route.Ipv6NaRoute',
False,
[
_MetaInfoClassMember('address-family', REFERENCE_ENUM_CLASS, 'SrgAddrFamilyEnum' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_subscriber_srg_cfg', 'SrgAddrFamilyEnum',
[], [],
''' Type of IPv6 address with prefix-length
''',
'address_family',
'Cisco-IOS-XR-subscriber-srg-cfg', False),
_MetaInfoClassMember('prefix-length', ATTRIBUTE, 'int' , None, None,
[('-2147483648', '2147483647')], [],
''' Prefix of the IP Address
''',
'prefix_length',
'Cisco-IOS-XR-subscriber-srg-cfg', False),
_MetaInfoClassMember('prefix-string', REFERENCE_UNION, 'str' , None, None,
[], [],
''' IPv6 address with prefix-length
''',
'prefix_string',
'Cisco-IOS-XR-subscriber-srg-cfg', False, [
_MetaInfoClassMember('prefix-string', ATTRIBUTE, 'str' , None, None,
[], [b'(([0-9]|[1-9][0-9]|1[0-9][0-9]|2[0-4][0-9]|25[0-5])\\.){3}([0-9]|[1-9][0-9]|1[0-9][0-9]|2[0-4][0-9]|25[0-5])(%[\\p{N}\\p{L}]+)?'],
''' IPv6 address with prefix-length
''',
'prefix_string',
'Cisco-IOS-XR-subscriber-srg-cfg', False),
_MetaInfoClassMember('prefix-string', ATTRIBUTE, 'str' , None, None,
[], [b'((:|[0-9a-fA-F]{0,4}):)([0-9a-fA-F]{0,4}:){0,5}((([0-9a-fA-F]{0,4}:)?(:|[0-9a-fA-F]{0,4}))|(((25[0-5]|2[0-4][0-9]|[01]?[0-9]?[0-9])\\.){3}(25[0-5]|2[0-4][0-9]|[01]?[0-9]?[0-9])))(%[\\p{N}\\p{L}]+)?'],
''' IPv6 address with prefix-length
''',
'prefix_string',
'Cisco-IOS-XR-subscriber-srg-cfg', False),
]),
_MetaInfoClassMember('value', ATTRIBUTE, 'int' , None, None,
[('-2147483648', '2147483647')], [],
''' Tag value
''',
'value',
'Cisco-IOS-XR-subscriber-srg-cfg', False),
],
'Cisco-IOS-XR-subscriber-srg-cfg',
'ipv6na-route',
_yang_ns._namespaces['Cisco-IOS-XR-subscriber-srg-cfg'],
'ydk.models.cisco_ios_xr.Cisco_IOS_XR_subscriber_srg_cfg'
),
},
'SubscriberRedundancy.Groups.Group.StateControlRoute.Ipv6Route.Ipv6PdRoute' : {
'meta_info' : _MetaInfoClass('SubscriberRedundancy.Groups.Group.StateControlRoute.Ipv6Route.Ipv6PdRoute',
False,
[
_MetaInfoClassMember('address-family', REFERENCE_ENUM_CLASS, 'SrgAddrFamilyEnum' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_subscriber_srg_cfg', 'SrgAddrFamilyEnum',
[], [],
''' Type of IPv6 address with prefix-length
''',
'address_family',
'Cisco-IOS-XR-subscriber-srg-cfg', False),
_MetaInfoClassMember('prefix-length', ATTRIBUTE, 'int' , None, None,
[('-2147483648', '2147483647')], [],
''' Prefix of the IP Address
''',
'prefix_length',
'Cisco-IOS-XR-subscriber-srg-cfg', False),
_MetaInfoClassMember('prefix-string', REFERENCE_UNION, 'str' , None, None,
[], [],
''' IPv6 address with prefix-length
''',
'prefix_string',
'Cisco-IOS-XR-subscriber-srg-cfg', False, [
_MetaInfoClassMember('prefix-string', ATTRIBUTE, 'str' , None, None,
[], [b'(([0-9]|[1-9][0-9]|1[0-9][0-9]|2[0-4][0-9]|25[0-5])\\.){3}([0-9]|[1-9][0-9]|1[0-9][0-9]|2[0-4][0-9]|25[0-5])(%[\\p{N}\\p{L}]+)?'],
''' IPv6 address with prefix-length
''',
'prefix_string',
'Cisco-IOS-XR-subscriber-srg-cfg', False),
_MetaInfoClassMember('prefix-string', ATTRIBUTE, 'str' , None, None,
[], [b'((:|[0-9a-fA-F]{0,4}):)([0-9a-fA-F]{0,4}:){0,5}((([0-9a-fA-F]{0,4}:)?(:|[0-9a-fA-F]{0,4}))|(((25[0-5]|2[0-4][0-9]|[01]?[0-9]?[0-9])\\.){3}(25[0-5]|2[0-4][0-9]|[01]?[0-9]?[0-9])))(%[\\p{N}\\p{L}]+)?'],
''' IPv6 address with prefix-length
''',
'prefix_string',
'Cisco-IOS-XR-subscriber-srg-cfg', False),
]),
_MetaInfoClassMember('value', ATTRIBUTE, 'int' , None, None,
[('-2147483648', '2147483647')], [],
''' Tag value
''',
'value',
'Cisco-IOS-XR-subscriber-srg-cfg', False),
],
'Cisco-IOS-XR-subscriber-srg-cfg',
'ipv6pd-route',
_yang_ns._namespaces['Cisco-IOS-XR-subscriber-srg-cfg'],
'ydk.models.cisco_ios_xr.Cisco_IOS_XR_subscriber_srg_cfg'
),
},
'SubscriberRedundancy.Groups.Group.StateControlRoute.Ipv6Route' : {
'meta_info' : _MetaInfoClass('SubscriberRedundancy.Groups.Group.StateControlRoute.Ipv6Route',
False,
[
_MetaInfoClassMember('ipv6na-route', REFERENCE_CLASS, 'Ipv6NaRoute' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_subscriber_srg_cfg', 'SubscriberRedundancy.Groups.Group.StateControlRoute.Ipv6Route.Ipv6NaRoute',
[], [],
''' None
''',
'ipv6na_route',
'Cisco-IOS-XR-subscriber-srg-cfg', False),
_MetaInfoClassMember('ipv6pd-route', REFERENCE_CLASS, 'Ipv6PdRoute' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_subscriber_srg_cfg', 'SubscriberRedundancy.Groups.Group.StateControlRoute.Ipv6Route.Ipv6PdRoute',
[], [],
''' None
''',
'ipv6pd_route',
'Cisco-IOS-XR-subscriber-srg-cfg', False),
],
'Cisco-IOS-XR-subscriber-srg-cfg',
'ipv6-route',
_yang_ns._namespaces['Cisco-IOS-XR-subscriber-srg-cfg'],
'ydk.models.cisco_ios_xr.Cisco_IOS_XR_subscriber_srg_cfg'
),
},
'SubscriberRedundancy.Groups.Group.StateControlRoute' : {
'meta_info' : _MetaInfoClass('SubscriberRedundancy.Groups.Group.StateControlRoute',
False,
[
_MetaInfoClassMember('ipv4-route', REFERENCE_CLASS, 'Ipv4Route' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_subscriber_srg_cfg', 'SubscriberRedundancy.Groups.Group.StateControlRoute.Ipv4Route',
[], [],
''' None
''',
'ipv4_route',
'Cisco-IOS-XR-subscriber-srg-cfg', False),
_MetaInfoClassMember('ipv6-route', REFERENCE_CLASS, 'Ipv6Route' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_subscriber_srg_cfg', 'SubscriberRedundancy.Groups.Group.StateControlRoute.Ipv6Route',
[], [],
''' None
''',
'ipv6_route',
'Cisco-IOS-XR-subscriber-srg-cfg', False),
],
'Cisco-IOS-XR-subscriber-srg-cfg',
'state-control-route',
_yang_ns._namespaces['Cisco-IOS-XR-subscriber-srg-cfg'],
'ydk.models.cisco_ios_xr.Cisco_IOS_XR_subscriber_srg_cfg'
),
},
'SubscriberRedundancy.Groups.Group' : {
'meta_info' : _MetaInfoClass('SubscriberRedundancy.Groups.Group',
False,
[
_MetaInfoClassMember('group-id', ATTRIBUTE, 'int' , None, None,
[('1', '500')], [],
''' Group ID
''',
'group_id',
'Cisco-IOS-XR-subscriber-srg-cfg', True),
_MetaInfoClassMember('access-tracking-object', ATTRIBUTE, 'str' , None, None,
[], [],
''' Access Tracking Object for this Group
''',
'access_tracking_object',
'Cisco-IOS-XR-subscriber-srg-cfg', False),
_MetaInfoClassMember('core-tracking-object', ATTRIBUTE, 'str' , None, None,
[], [],
''' Core Tracking Object for this Group
''',
'core_tracking_object',
'Cisco-IOS-XR-subscriber-srg-cfg', False),
_MetaInfoClassMember('description', ATTRIBUTE, 'str' , None, None,
[], [],
''' Description for this Group
''',
'description',
'Cisco-IOS-XR-subscriber-srg-cfg', False),
_MetaInfoClassMember('disable-tracking-object', ATTRIBUTE, 'Empty' , None, None,
[], [],
''' Disable Tracking Object for this Group
''',
'disable_tracking_object',
'Cisco-IOS-XR-subscriber-srg-cfg', False),
_MetaInfoClassMember('enable', ATTRIBUTE, 'Empty' , None, None,
[], [],
''' Enable Redundancy Group configuration.
Deletion of this object also causes deletion
of all associated objects under Group.
''',
'enable',
'Cisco-IOS-XR-subscriber-srg-cfg', False),
_MetaInfoClassMember('enable-fast-switchover', ATTRIBUTE, 'Empty' , None, None,
[], [],
''' Enable fast switchover for this Group
''',
'enable_fast_switchover',
'Cisco-IOS-XR-subscriber-srg-cfg', False),
_MetaInfoClassMember('hold-timer', ATTRIBUTE, 'int' , None, None,
[('1', '65535')], [],
''' Set hold time (in Minutes)
''',
'hold_timer',
'Cisco-IOS-XR-subscriber-srg-cfg', False),
_MetaInfoClassMember('interface-list', REFERENCE_CLASS, 'InterfaceList' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_subscriber_srg_cfg', 'SubscriberRedundancy.Groups.Group.InterfaceList',
[], [],
''' List of Interfaces for this Group
''',
'interface_list',
'Cisco-IOS-XR-subscriber-srg-cfg', False),
_MetaInfoClassMember('l2tp-source-ip-address', ATTRIBUTE, 'str' , None, None,
[], [b'(([0-9]|[1-9][0-9]|1[0-9][0-9]|2[0-4][0-9]|25[0-5])\\.){3}([0-9]|[1-9][0-9]|1[0-9][0-9]|2[0-4][0-9]|25[0-5])(%[\\p{N}\\p{L}]+)?'],
''' Enter an IP address
''',
'l2tp_source_ip_address',
'Cisco-IOS-XR-subscriber-srg-cfg', False),
_MetaInfoClassMember('peer', REFERENCE_CLASS, 'Peer' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_subscriber_srg_cfg', 'SubscriberRedundancy.Groups.Group.Peer',
[], [],
''' None
''',
'peer',
'Cisco-IOS-XR-subscriber-srg-cfg', False),
_MetaInfoClassMember('preferred-role', REFERENCE_ENUM_CLASS, 'SubscriberRedundancyGroupRoleEnum' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_subscriber_srg_cfg', 'SubscriberRedundancyGroupRoleEnum',
[], [],
''' Set preferred role
''',
'preferred_role',
'Cisco-IOS-XR-subscriber-srg-cfg', False),
_MetaInfoClassMember('redundancy-disable', ATTRIBUTE, 'Empty' , None, None,
[], [],
''' Disable
''',
'redundancy_disable',
'Cisco-IOS-XR-subscriber-srg-cfg', False),
_MetaInfoClassMember('revertive-timer', REFERENCE_CLASS, 'RevertiveTimer' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_subscriber_srg_cfg', 'SubscriberRedundancy.Groups.Group.RevertiveTimer',
[], [],
''' None
''',
'revertive_timer',
'Cisco-IOS-XR-subscriber-srg-cfg', False),
_MetaInfoClassMember('slave-mode', REFERENCE_ENUM_CLASS, 'SubscriberRedundancyGroupSlaveModeEnum' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_subscriber_srg_cfg', 'SubscriberRedundancyGroupSlaveModeEnum',
[], [],
''' Set Slave Mode
''',
'slave_mode',
'Cisco-IOS-XR-subscriber-srg-cfg', False),
_MetaInfoClassMember('state-control-route', REFERENCE_CLASS, 'StateControlRoute' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_subscriber_srg_cfg', 'SubscriberRedundancy.Groups.Group.StateControlRoute',
[], [],
''' None
''',
'state_control_route',
'Cisco-IOS-XR-subscriber-srg-cfg', False),
_MetaInfoClassMember('virtual-mac', REFERENCE_CLASS, 'VirtualMac' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_subscriber_srg_cfg', 'SubscriberRedundancy.Groups.Group.VirtualMac',
[], [],
''' Virtual MAC Address for this Group
''',
'virtual_mac',
'Cisco-IOS-XR-subscriber-srg-cfg', False),
],
'Cisco-IOS-XR-subscriber-srg-cfg',
'group',
_yang_ns._namespaces['Cisco-IOS-XR-subscriber-srg-cfg'],
'ydk.models.cisco_ios_xr.Cisco_IOS_XR_subscriber_srg_cfg'
),
},
'SubscriberRedundancy.Groups' : {
'meta_info' : _MetaInfoClass('SubscriberRedundancy.Groups',
False,
[
_MetaInfoClassMember('group', REFERENCE_LIST, 'Group' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_subscriber_srg_cfg', 'SubscriberRedundancy.Groups.Group',
[], [],
''' Redundancy Group configuration
''',
'group',
'Cisco-IOS-XR-subscriber-srg-cfg', False),
],
'Cisco-IOS-XR-subscriber-srg-cfg',
'groups',
_yang_ns._namespaces['Cisco-IOS-XR-subscriber-srg-cfg'],
'ydk.models.cisco_ios_xr.Cisco_IOS_XR_subscriber_srg_cfg'
),
},
'SubscriberRedundancy.RevertiveTimer' : {
'meta_info' : _MetaInfoClass('SubscriberRedundancy.RevertiveTimer',
False,
[
_MetaInfoClassMember('max-value', ATTRIBUTE, 'int' , None, None,
[('1', '65535')], [],
''' Value of MAX Revertive Timer
''',
'max_value',
'Cisco-IOS-XR-subscriber-srg-cfg', False),
_MetaInfoClassMember('value', ATTRIBUTE, 'int' , None, None,
[('1', '65535')], [],
''' Value of revertive time in minutes
''',
'value',
'Cisco-IOS-XR-subscriber-srg-cfg', False),
],
'Cisco-IOS-XR-subscriber-srg-cfg',
'revertive-timer',
_yang_ns._namespaces['Cisco-IOS-XR-subscriber-srg-cfg'],
'ydk.models.cisco_ios_xr.Cisco_IOS_XR_subscriber_srg_cfg'
),
},
'SubscriberRedundancy' : {
'meta_info' : _MetaInfoClass('SubscriberRedundancy',
False,
[
_MetaInfoClassMember('enable', ATTRIBUTE, 'Empty' , None, None,
[], [],
''' Enable Subscriber Redundancy configuration.
Deletion of this object also causes deletion of
all associated objects under
SubscriberRedundancy.
''',
'enable',
'Cisco-IOS-XR-subscriber-srg-cfg', False),
_MetaInfoClassMember('groups', REFERENCE_CLASS, 'Groups' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_subscriber_srg_cfg', 'SubscriberRedundancy.Groups',
[], [],
''' Table of Group
''',
'groups',
'Cisco-IOS-XR-subscriber-srg-cfg', False),
_MetaInfoClassMember('hold-timer', ATTRIBUTE, 'int' , None, None,
[('1', '65535')], [],
''' Set hold time (in Minutes)
''',
'hold_timer',
'Cisco-IOS-XR-subscriber-srg-cfg', False),
_MetaInfoClassMember('preferred-role', REFERENCE_ENUM_CLASS, 'SubscriberRedundancyGroupRoleEnum' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_subscriber_srg_cfg', 'SubscriberRedundancyGroupRoleEnum',
[], [],
''' Set preferred role
''',
'preferred_role',
'Cisco-IOS-XR-subscriber-srg-cfg', False),
_MetaInfoClassMember('redundancy-disable', ATTRIBUTE, 'Empty' , None, None,
[], [],
''' Disable
''',
'redundancy_disable',
'Cisco-IOS-XR-subscriber-srg-cfg', False),
_MetaInfoClassMember('revertive-timer', REFERENCE_CLASS, 'RevertiveTimer' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_subscriber_srg_cfg', 'SubscriberRedundancy.RevertiveTimer',
[], [],
''' None
''',
'revertive_timer',
'Cisco-IOS-XR-subscriber-srg-cfg', False),
_MetaInfoClassMember('slave-mode', REFERENCE_ENUM_CLASS, 'SubscriberRedundancyGroupSlaveModeEnum' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_subscriber_srg_cfg', 'SubscriberRedundancyGroupSlaveModeEnum',
[], [],
''' Set slave
''',
'slave_mode',
'Cisco-IOS-XR-subscriber-srg-cfg', False),
_MetaInfoClassMember('source-interface', ATTRIBUTE, 'str' , None, None,
[], [b'(([a-zA-Z0-9_]*\\d+/){3,4}\\d+)|(([a-zA-Z0-9_]*\\d+/){3,4}\\d+\\.\\d+)|(([a-zA-Z0-9_]*\\d+/){2}([a-zA-Z0-9_]*\\d+))|(([a-zA-Z0-9_]*\\d+/){2}([a-zA-Z0-9_]+))|([a-zA-Z0-9_-]*\\d+)|([a-zA-Z0-9_-]*\\d+\\.\\d+)|(mpls)|(dwdm)'],
''' Source Interface for Redundancy Peer
Communication
''',
'source_interface',
'Cisco-IOS-XR-subscriber-srg-cfg', False),
_MetaInfoClassMember('virtual-mac-prefix', ATTRIBUTE, 'str' , None, None,
[], [b'[0-9a-fA-F]{2}(:[0-9a-fA-F]{2}){5}'],
''' Virtual MAC Prefix for Subscriber Redundancy
''',
'virtual_mac_prefix',
'Cisco-IOS-XR-subscriber-srg-cfg', False),
],
'Cisco-IOS-XR-subscriber-srg-cfg',
'subscriber-redundancy',
_yang_ns._namespaces['Cisco-IOS-XR-subscriber-srg-cfg'],
'ydk.models.cisco_ios_xr.Cisco_IOS_XR_subscriber_srg_cfg'
),
},
}
_meta_table['SubscriberRedundancy.Groups.Group.InterfaceList.Interfaces.Interface']['meta_info'].parent =_meta_table['SubscriberRedundancy.Groups.Group.InterfaceList.Interfaces']['meta_info']
_meta_table['SubscriberRedundancy.Groups.Group.InterfaceList.InterfaceRanges.InterfaceRange']['meta_info'].parent =_meta_table['SubscriberRedundancy.Groups.Group.InterfaceList.InterfaceRanges']['meta_info']
_meta_table['SubscriberRedundancy.Groups.Group.InterfaceList.Interfaces']['meta_info'].parent =_meta_table['SubscriberRedundancy.Groups.Group.InterfaceList']['meta_info']
_meta_table['SubscriberRedundancy.Groups.Group.InterfaceList.InterfaceRanges']['meta_info'].parent =_meta_table['SubscriberRedundancy.Groups.Group.InterfaceList']['meta_info']
_meta_table['SubscriberRedundancy.Groups.Group.Peer.Ipaddress']['meta_info'].parent =_meta_table['SubscriberRedundancy.Groups.Group.Peer']['meta_info']
_meta_table['SubscriberRedundancy.Groups.Group.StateControlRoute.Ipv6Route.Ipv6NaRoute']['meta_info'].parent =_meta_table['SubscriberRedundancy.Groups.Group.StateControlRoute.Ipv6Route']['meta_info']
_meta_table['SubscriberRedundancy.Groups.Group.StateControlRoute.Ipv6Route.Ipv6PdRoute']['meta_info'].parent =_meta_table['SubscriberRedundancy.Groups.Group.StateControlRoute.Ipv6Route']['meta_info']
_meta_table['SubscriberRedundancy.Groups.Group.StateControlRoute.Ipv4Route']['meta_info'].parent =_meta_table['SubscriberRedundancy.Groups.Group.StateControlRoute']['meta_info']
_meta_table['SubscriberRedundancy.Groups.Group.StateControlRoute.Ipv6Route']['meta_info'].parent =_meta_table['SubscriberRedundancy.Groups.Group.StateControlRoute']['meta_info']
_meta_table['SubscriberRedundancy.Groups.Group.InterfaceList']['meta_info'].parent =_meta_table['SubscriberRedundancy.Groups.Group']['meta_info']
_meta_table['SubscriberRedundancy.Groups.Group.Peer']['meta_info'].parent =_meta_table['SubscriberRedundancy.Groups.Group']['meta_info']
_meta_table['SubscriberRedundancy.Groups.Group.RevertiveTimer']['meta_info'].parent =_meta_table['SubscriberRedundancy.Groups.Group']['meta_info']
_meta_table['SubscriberRedundancy.Groups.Group.VirtualMac']['meta_info'].parent =_meta_table['SubscriberRedundancy.Groups.Group']['meta_info']
_meta_table['SubscriberRedundancy.Groups.Group.StateControlRoute']['meta_info'].parent =_meta_table['SubscriberRedundancy.Groups.Group']['meta_info']
_meta_table['SubscriberRedundancy.Groups.Group']['meta_info'].parent =_meta_table['SubscriberRedundancy.Groups']['meta_info']
_meta_table['SubscriberRedundancy.Groups']['meta_info'].parent =_meta_table['SubscriberRedundancy']['meta_info']
_meta_table['SubscriberRedundancy.RevertiveTimer']['meta_info'].parent =_meta_table['SubscriberRedundancy']['meta_info']
|
|
import datetime
import os
import StringIO
import base64
import matplotlib.pyplot as pyplot
import matplotlib.backends.backend_agg as pltagg
import numpy as np
import sys
import plotly.plotly as py
from plotly.graph_objs import Scatter, Data, Layout, XAxis, YAxis, ZAxis
from plotly.graph_objs import Figure, Line, Bar, Scatter3d, Scene, Surface
from plotly.graph_objs import Heatmap, ErrorY
import time
from DBUtils import DBUtils
class chart:
def __init__(self):
self.__pgdb = DBUtils()
# Caches used for some functions.
self.__gen_data_cache = {}
@staticmethod
def __generate_plotly_url(fig, **kwargs):
""" Returns a ready-to-embed URL to a provided fig.
"""
# Sign in, if necessary.
if py.get_credentials()["username"] == "":
py.sign_in("jmoles", os.environ.get('PLOTLY_API_KEY'))
return py.plot(
fig,
auto_open=False,
**kwargs)
@staticmethod
def plotly_single_run_set(run_id, run_info=None):
# Establish a database connection
pgdb = DBUtils()
# Fetch this run's information, if not provided.
if run_info is None:
run_info = pgdb.fetchRunInfo(run_id)[run_id]
# Determine the maximum amount of food and moves possible.
trail_data = pgdb.getTrailData(run_info["trails_id"])[0]
max_food = np.bincount(np.squeeze(np.asarray(trail_data.flatten())))[1]
max_moves = np.array(run_info["moves_limit"])
# Fetch the data on the run and determine number of generations.
gens_data = pgdb.fetchRunGenerations([run_id])[run_id]
num_gens = len(gens_data)
x = np.linspace(0, num_gens - 1, num=num_gens)
# Settings used for plotting.
chart_set_config = {
"food" : {
"db_key" : "food",
"stats" : ["max", "min", "avg", "std"],
"title" : "Food vs. Generations for Run ID {0}",
"type" : Scatter,
"plot-mode" : "lines",
"xaxis" : "Generations",
"yaxis" : "Food Consumed",
"max-line" : max_food,
"max-title" : "Available"
},
"moves-taken" : {
"db_key" : "moves",
"stats" : ["max", "min", "avg", "std"],
"title" : "Moves Taken vs. Generations for Run ID {0}",
"type" : Scatter,
"plot-mode" : "lines",
"xaxis" : "Generations",
"yaxis" : "Moves Taken",
"max-line" : max_moves,
"max-title" : "Limit"
},
"moves-dir" : {
"db_key" : "moves",
"stats" : ["left", "right", "forward", "none"],
"title" : "Move Types vs. Generations for Run ID {0}",
"type" : Scatter,
"plot-mode" : "lines",
"xaxis" : "Generations",
"yaxis" : "Move Type",
"max-line" : None,
}
}
plot_urls = {}
# TODO: Could multithread here to speed things up.
for chart_type, settings in chart_set_config.items():
traces_list = []
# Go through each of the stats and build the traces.
for stat in settings["stats"]:
data_set = np.zeros((num_gens))
for curr_gen in range(0, num_gens):
data_set[curr_gen] = (
gens_data[curr_gen]
[settings["db_key"]][stat])
this_trace = settings["type"](
x=x,
y=data_set,
mode=settings["plot-mode"],
name=stat.title()
)
traces_list.append(this_trace)
# If desired, add the maximum line.
if settings["max-line"] is not None:
y_val = np.empty(len(x))
y_val.fill(settings["max-line"])
traces_list.append(
settings["type"](
x=x,
y=y_val,
mode="lines",
line={
"dash" : "dash"
},
name=settings["max-title"].title()
)
)
layout = Layout(
title=settings["title"].format(run_id),
xaxis=XAxis(
title=settings["xaxis"].format(run_id)
),
yaxis=YAxis(
title=settings["yaxis"].format(run_id)
),
)
fig = Figure(data=Data(traces_list), layout=layout)
# Generate the URL.
plot_urls[chart_type] = chart.__generate_plotly_url(fig,
filename="apigen/{0}_{1}".format(chart_type, run_id),
fileopt='overwrite',)
return plot_urls
@staticmethod
def sweep_charts(db_data, config_id, config_info, sweep_type, x_label,
y_label=None):
""" Given a set of db_data from
DBUtils.fetch_run_config_sweep_by_network along with the config_id,
and maximum amount of food, generates a food and moves taken sweep
plot.
Returns ready to embed URLs.
"""
plot_urls = {}
is_3d = False # Determines if plot is 3d
# Determine how to label the axes.
if sweep_type == "selection":
# Grab the x-axis labels for this plot.
x_label_vals = [y[3] for y in [
db_data[x][0] for x in db_data]]
else:
x_label_vals = sorted(db_data.keys())
chart_set_config = {
"food" : {
"title" : "Food vs. {0} Sweep".format(x_label),
"db-idx" : 0,
"val-func" : [max, np.average],
"plot-mode" : "lines",
"xaxis" : x_label.title(),
"yaxis" : "Food Consumed",
"max-line" : config_info["max_food"],
"max-title" : "Available",
"label" : ["max", "mean", "std"]
},
"moves-taken" : {
"title" : "Moves Taken vs. {0} Sweep".format(x_label),
"db-idx" : 1,
"val-func" : [min, np.average],
"plot-mode" : "lines",
"xaxis" : x_label.title(),
"yaxis" : "Moves Taken",
"label" : ["min", "mean", "std"]
},
"num-runs" : {
"title" : "Number of runs",
"db-idx" : 1,
"val-func" : [len],
"plot-mode" : "lines",
"xaxis" : x_label.title(),
"yaxis" : "Moves Taken",
"label" : ["min", "mean", "std"]
},
}
# Add the max line for moves if not "moves_limit" type.
if sweep_type != "moves_limit":
chart_set_config["moves-taken"]["max-line"] = (
config_info["moves_limit"])
chart_set_config["moves-taken"]["max-title"] = "Limit"
if (sweep_type == "p_mutate_crossover" or
sweep_type == "dl_length_hidden"):
for curr_key in chart_set_config.keys():
chart_set_config[curr_key]["xaxis"] = x_label
chart_set_config[curr_key]["yaxis"] = y_label
chart_set_config[curr_key]["type"] = Heatmap
if curr_key == "food":
chart_set_config[curr_key]["zaxis"] = "Food Consumed"
chart_set_config[curr_key]["title"] = "Food 3D Sweep"
chart_set_config[curr_key]["val-func"] = [max]
elif curr_key == "moves-taken":
chart_set_config[curr_key]["zaxis"] = "Food Consumed"
chart_set_config[curr_key]["title"] = "Moves Taken 3D Sweep"
chart_set_config[curr_key]["val-func"] = [min]
elif curr_key == "num-runs":
chart_set_config[curr_key]["zaxis"] = "Number of Runs"
if sweep_type == "p_mutate_crossover":
step_size = 0.1
else:
step_size = 1.0
chart_set_config[curr_key]["step-size"] = step_size
is_3d = True
# TODO: Could multithread here to speed things up.
for chart_type, settings in chart_set_config.items():
traces_list = []
for idx, this_func in enumerate(settings["val-func"]):
x_vals = []
y_vals = []
z_vals = []
y_std_dev = []
if is_3d:
y_vals = sorted(db_data.keys())
# Need to find the length of x and min/max x to
# figure out the labels and empty spots on heat chart.
len_y = len(y_vals)
x_vals = []
for cy in y_vals:
curr_x = sorted(db_data[cy].keys())
x_vals.extend(curr_x)
x_vals = list(set(x_vals))
x_vals.sort()
y_vals = list(np.around(
np.arange(
start=min(y_vals),
stop=max(y_vals) + settings["step-size"],
step=settings["step-size"]),
decimals=4))
x_vals = list(np.around(
np.arange(
start=min(x_vals),
stop=max(x_vals) + settings["step-size"],
step=settings["step-size"]),
decimals=4))
# Go through all of the y/x values and fill in z.
for cy in y_vals:
this_z = dict.fromkeys(x_vals)
if cy in db_data:
for cx in sorted(db_data[cy].keys()):
this_z[cx] = this_func(
[x[settings["db-idx"]] for x in db_data[cy][cx]])
this_z = [myz[1] for myz in sorted(this_z.items())]
z_vals.append(this_z)
this_trace = settings["type"](
x=x_vals,
y=y_vals,
z=z_vals,
name=settings["label"][idx].title()
)
else:
for curr_x in sorted(db_data.keys()):
y_vals.append(this_func(
[x[settings["db-idx"]] for x in db_data[curr_x]]))
if this_func == np.average:
y_std_dev.append(np.std(
[x[settings["db-idx"]] for x in db_data[curr_x]]))
if this_func == np.average:
this_trace = Scatter(
x=x_label_vals,
y=y_vals,
mode=settings["plot-mode"],
name=settings["label"][idx].title(),
error_y=ErrorY(
type='data',
array=y_std_dev,
visible=True,
)
)
else:
this_trace = Scatter(
x=x_label_vals,
y=y_vals,
mode=settings["plot-mode"],
name=settings["label"][idx].title()
)
traces_list.append(this_trace)
# If desired, add the maximum line.
if "max-line" in settings and not is_3d:
y_val = np.empty(len(x_label_vals))
y_val.fill(settings["max-line"])
traces_list.append(
Scatter(
x=x_label_vals,
y=y_val,
mode="lines",
line={
"dash" : "dash"
},
name=settings["max-title"].title()
)
)
layout = Layout(
title=settings["title"],
xaxis=XAxis(title=settings["xaxis"]),
yaxis=YAxis(title=settings["yaxis"]),
)
fig = Figure(data=Data(traces_list), layout=layout)
# Generate the URL.
plot_urls[chart_type] = chart.__generate_plotly_url(fig,
filename="apigen/sweep_{0}_{1}_{2}".format(
''.join(e for e in x_label if e.isalnum()),
config_id,
chart_type),
fileopt="overwrite")
return plot_urls
def line_by_config_id(self, config_id, ext="png", stat_group="food",
stat=None, show_title=True):
if stat_group == "moves_stats" and stat == None:
stat=["left", "right", "forward", "none"]
elif stat == None:
stat=["min", "max", "avg"]
# Get the list of run_ids with this configuration.
run_ids_l = self.__pgdb.getRunsWithConfigID(config_id)
# Generate the figure and axes common to all of these.
fig = pyplot.Figure()
axis = fig.add_subplot(1,1,1)
# Get information on the run
run_info = self.__pgdb.fetchConfigInfo(config_id)
max_food = run_info["max_food"]
# Find the network name, trail name, and number of generations.
net_name = run_info["network_name"]
trail_name = run_info["trail_name"]
num_gens = run_info["generations"]
max_moves = np.array(run_info["moves_limit"])
# Take each run and now fetch data for each.
ids_search_l = []
for curr_id in run_ids_l:
if not self.__gen_data_cache.has_key(curr_id):
ids_search_l.append(curr_id)
if len(ids_search_l) > 0:
self.__gen_data_cache = dict(
self.__gen_data_cache.items() +
self.__pgdb.fetchRunGenerations(ids_search_l).items())
gens_data = self.__gen_data_cache
x = np.linspace(0, num_gens - 1, num=num_gens)
for curr_stat in stat:
data_set = np.zeros((num_gens))
for curr_gen in range(0, num_gens):
if stat_group == "moves_stats":
curr_stat_group = "moves"
else:
curr_stat_group = stat_group
this_gen = []
for curr_run in run_ids_l:
if curr_gen in gens_data[curr_run]:
this_gen.append(gens_data[curr_run][curr_gen]
[curr_stat_group][curr_stat])
else:
this_gen.append(None)
data_set[curr_gen] = np.mean(
filter(lambda a: a is not None, this_gen))
axis.plot(x, data_set, '-', label=curr_stat.title())
if show_title:
plot_title = (
"Mean - {0} - {1} g{2}/p{3}".format(
net_name,
trail_name,
num_gens,
run_info["population"]))
axis.set_title(plot_title)
# Determine the maximum type to show.
if stat_group == "food":
axis.plot(x, np.repeat(np.array(max_food), num_gens), 'r--')
axis.axis((0, num_gens, 0, max_food + 5))
axis.set_ylabel("Food Consumed")
axis.set_xlabel("Generations")
axis.legend(loc="best")
elif stat_group == "moves":
axis.plot(x, np.repeat(
np.array(max_moves),
num_gens), 'r--')
axis.axis((0, num_gens, 0, max_moves + 5))
axis.set_ylabel("Moves Taken")
axis.set_xlabel("Generations")
axis.legend(loc="lower left")
elif stat_group == "moves_stats":
axis.axis((0, num_gens, 0, max_moves + 5))
axis.set_ylabel("Moves Taken")
axis.set_xlabel("Generations")
axis.legend(loc="upper left", ncol=2)
fig.set_facecolor('w')
return (self.__createImage(fig, ext), len(run_ids_l))
def __createImage(self, fig, ext="jpg"):
""" Takes a matplotlib fig and generates given ext type.
Returns
"""
canvas = pltagg.FigureCanvasAgg(fig)
output = StringIO.StringIO()
if ext == "tif" or ext == "tiff":
canvas.print_tif(output)
elif ext == "bmp":
canvas.print_bmp(output)
elif ext == "eps":
canvas.print_eps(output)
elif ext == "png":
canvas.print_png(output)
elif ext == "pdf":
canvas.print_pdf(output)
elif ext == "svg":
canvas.print_svg(output)
else:
canvas.print_jpg(output)
return output
|
|
import re
import math
import glob
import pickle
import sys
from Peptide import *
from XLink import *
from Match import *
from MZXMLReader import *
from FastaReader import *
from random import shuffle
def logisticEval(b, x):
tmp = x
x = [1]
x.extend(tmp)
# b = [1].extend(b)
val = 0
# print '%d\t%d\n' % (len(b), len(x))
for i in range(len(b)):
val += b[i] * x[i]
return float(1) / (1 + math.exp(-val))
def adjustPrior(prior_t, p, iter):
posterior_t = []
for unit in p:
posterior_t.extend(unit)
N = len(posterior_t)
prior = prior_t
posterior = [0.0] * N
for i in range(iter):
for k in range(N):
denom = (float(prior) / prior_t) * posterior_t[k] + (float(1 - prior) / (1 - prior_t)) * (1 - posterior_t[k])
if denom == 0:
print 'denom == 0'
posterior[k] = float(prior * posterior_t[k]) / (prior_t * denom)
prior_prev = prior
prior = sum(posterior) / len(posterior)
# print prior
newp = []
b = 0
for i in range(len(p)):
e = b + len(p[i])
newp.append(posterior[b : e])
b = e
return newp
def adjustPriorMarginal(prior_t, p, marginal, iter):
posterior_t = []
for unit in p:
posterior_t.extend(unit)
N = len(posterior_t)
prior = prior_t
posterior = [0.0] * N
# marginal = [1.0] * N
for i in range(iter):
for k in range(N):
denom = (float(prior) / prior_t) * posterior_t[k] + (float(1 - prior) / (1 - prior_t)) * (1 - posterior_t[k])
if denom == 0:
print 'denom == 0'
posterior[k] = float(prior * posterior_t[k]) / (prior_t * denom)
prior_prev = prior
# prior = sum(posterior) / len(posterior)
prior = 0.0
for j in range(N):
prior = prior + posterior[j] * marginal[j]
prior = prior / sum(marginal)
print 'iteration = %d, %.10f' % (i, prior)
if i > 0 and abs(float(prior_prev - prior) / prior_prev) <= 0.01:
print '%f' % abs(float(prior_prev - prior) / prior_prev)
break
newp = []
b = 0
for i in range(len(p)):
e = b + len(p[i])
newp.append(posterior[b : e])
b = e
return newp
def getMarginal(p21, p11, p12, p22):
alphaT = []
betaT = []
alphaF = []
betaF = []
for i in range(len(p21)):
for j in range(len(p21[i])):
denom = 1.0 - (p11[i][j] - p12[i][j]) * (p21[i][j] - p22[i][j])
at = (p12[i][j] + p22[i][j] * (p11[i][j] - p12[i][j])) / denom
bt = (p22[i][j] + p12[i][j] * (p21[i][j] - p22[i][j])) / denom
af = 1.0 - at
bf = 1.0 - bt
alphaT.append(at)
betaT.append(bt)
alphaF.append(af)
betaF.append(bf)
return [alphaT, betaT, alphaF, betaF]
def getMatchesPerSpectrum(mass, param, index, title):
spectraDict = index.spectraDict
uniquePepObjs = index.uniquePepObjs[0]
precMassPepIndexTuple = index.precMassPepIndexTuple
searchIndex = index.searchIndex
xresidue = param['xresidue']
indexList = searchIndex[title]
spectrum = spectraDict[title]
matches = []
XL = []
for i in indexList:
index1 = precMassPepIndexTuple[1][i]
index2 = precMassPepIndexTuple[2][i]
pepObj1 = uniquePepObjs[index1]
pepObj2 = uniquePepObjs[index2]
pepSorted = sorted([pepObj1, pepObj2], key = lambda x : x.sequence)
pepObj1 = pepSorted[0]
pepObj2 = pepSorted[1]
charge = spectraDict[title].ch
mz = spectraDict[title].mz
it = spectraDict[title].it
kPos1 = []
kPos2 = []
if param['ntermxlink'] == True:
if pepObj1.isNterm == True:
kPos1.append(0)
if pepObj2.isNterm == True:
kPos2.append(0)
pepseq1 = pepObj1.sequence
kPos1.extend(list(zip(*filter(lambda x : x[1] == xresidue, enumerate(pepseq1[:-1])))[0]))
pepseq2 = pepObj2.sequence
kPos2.extend(list(zip(*filter(lambda x : x[1] == xresidue, enumerate(pepseq2[:-1])))[0]))
for p1 in kPos1:
for p2 in kPos2:
positions = [p1, p2]
xl = XLink(pepObj1, pepObj2, positions, charge, mass, param); XL.append(xl)
match = Match(spectrum, xl, mass, param)
match.match(mass)
matches.append(match.getMatchInfo(index))
return matches
def getPepObjsFromProtein(header, sequence, patternString, mass, param):
missedSites = param['missedsites']
minLen = param['minlength']
maxLen = param['maxlength']
modRes = param['modRes']
pattern = re.compile(patternString)
sites = [0]
for i in range(len(sequence)):
if i == len(sequence) - 1:
sites.append(i + 1)
elif (sequence[i] == 'K' or sequence[i] == 'R') and sequence[i + 1] != 'P':
sites.append(i + 1)
peptides = []
for i in range(len(sites)):
if i < len(sites) - missedSites - 1:
for j in range(missedSites + 1):
seq = sequence[sites[i] : sites[i + j + 1]]
if len(seq) >= minLen and len(seq) <= maxLen and pattern.match(seq):
peptides.append(seq)
else:
for j in range(i + 1, len(sites)):
seq = sequence[sites[i] : sites[j]]
if len(seq) >= minLen and len(seq) <= maxLen and pattern.match(seq):
peptides.append(seq)
peptides = list(set(peptides))
pepObjs = []
for pep in peptides:
modification = dict(position=[], deltaMass=[])
isNterm = False
if pep == sequence[:len(pep)]:
isNterm = True
pepObjs.append(Peptide(pep, header, modification, mass, isNterm))
if len(modRes) != 0:
modMass = param['modMass']
index = [i for i, ltr in enumerate(pep) if ltr == modRes]
if len(index) != 0:
pep = list(pep)
for i in index:
modification = dict(position=[], deltaMass=[])
modification['position'].append(i)
modification['deltaMass'].append(modMass)
pep[i] = pep[i].lower()
pep = ''.join(pep)
pepObjs.append(Peptide(pep, header, modification, mass, isNterm))
return pepObjs
def readParam(filename):
param = dict(
useAIon=True,
verbose=False,
chargePreXlinkIons=[1, 3],
chargePostXlinkIons=[2, 5],
basepeakint = 100.0,
dynamicrange = 0.001,
missedsites = 2,
minlength = 4,
maxlength = 51,
modRes = '',
modMass = 0.0,
linkermass = 136.10005,
ms1tol = dict(measure='ppm', val=5),
ms2tol = dict(measure='da', val=0.01),
minmz = 200,
maxmz = 2000,
mode = 'conservative',
xresidue = 'K',
# patternstring = '^[ACDEFGHIKLMNPQRSTVWY]*K[ACDEFGHIKLMNPQRSTVWY]+$',
aa = 'ACDEFGHIKLMNPQRSTVWY',
neutralloss=dict(
h2oLoss=dict(
mass=-18.010565,
aa=set('ACDEFGHIKLMNPQRSTVWY')),
nh3Loss=dict(
mass=-17.026549,
aa=set('ACDEFGHIKLMNPQRSTVWY')),
h2oGain=dict(
mass=18.010565,
aa=set('ACDEFGHIKLMNPQRSTVWY'))),
model_TT_TF = [0.0] * 17,
model_TF_FF = [0.0] * 17,
nTT = 169,
nTF = 8568,
nFF = 91242)
mass = dict(
A=71.037114,
R=156.101111,
N=114.042927,
D=115.026943,
C=103.009184,
E=129.042593,
Q=128.058578,
G=57.021464,
H=137.058912,
I=113.084064,
L=113.084064,
K=128.094963,
M=131.040485,
F=147.068414,
P=97.052764,
S=87.032028,
T=101.047678,
W=186.079313,
Y=163.063329,
V=99.068414,
Hatom=1.007825032,
Oatom=15.99491462,
neutronmass = 1.008701,
BIonRes=1.0078246,
AIonRes=-26.9870904,
YIonRes=19.0183888,
isotopeInc = [1.008701/4, 1.008701/3, 1.008701/2, 1.008701/1])
f = open(filename)
lines = f.readlines()
for l in lines:
l = l[:-1]
columns= l.split('\t')
if len(l) == 0 or l[0] == '#' or len(columns) < 2:
continue
name = columns[0]
val = columns[1]
if name == 'database':
param['database'] = val
elif name == 'MS_data_directory':
param['msdata'] = val
elif name == 'XLresidue':
param['xresidue'] = val
elif name == 'ms1tol_unit':
param['ms1tol']['measure'] = val
elif name == 'ms1tol_val':
param['ms1tol']['val'] = int(val)
elif name == 'ms2tol_unit':
param['ms2tol']['measure'] = val
elif name == 'ms2tol_val':
param['ms2tol']['val'] = float(val)
elif name == 'linker_mass':
param['linkermass'] = float(val)
elif name == 'miss_cleave':
param['missedsites'] = int(val)
elif name == 'include_a_ions':
param['useAIon'] = True if val == 'True' else False
elif name == 'min_peplen':
param['minlength'] = int(val)
elif name == 'max_peplen':
param['maxlength'] = int(val)
elif name == 'fix_mod_res':
param['fix_mod_res'] = val
elif name == 'fix_mod_mass':
param['fix_mod_mass'] = float(val)
elif name == 'var_mod_res':
param['modRes'] = val
elif name == 'var_mod_mass':
param['modMass'] = float(val)
elif name == 'min_preXL_ions_ch':
param['chargePreXlinkIons'][0] = int(val)
elif name == 'max_preXL_ions_ch':
param['chargePreXlinkIons'][1] = int(val)
elif name == 'min_postXL_ions_ch':
param['chargePostXlinkIons'][0] = int(val)
elif name == 'max_postXL_ions_ch':
param['chargePostXlinkIons'][1] = int(val)
elif name == 'target_database':
param['target_database'] = val
elif name =='uniprot_database':
param['uniprot_database'] = val
elif name == 'max_iterations':
param['MAX_ITERATIONS'] = int(val)
elif name == 'annotate_spec':
param['annotation'] = True if val == 'True' else False
elif name == 'ntermxlink':
param['ntermxlink'] = True if val == 'True' else False
elif len(name) >= 4 and name[:2] == 'CI':
if len(name) == 4:
s = int(name[2:])
param['model_TT_TF'][s] = float(val)
elif len(name) == 5:
s = int(name[3:])
param['model_TF_FF'][s] = float(val)
elif name == 'nTT':
param['nTT'] = int(val)
elif name == 'nTF':
param['nTF'] = int(val)
elif name == 'nFF':
param['nFF'] = int(val)
param['patternstring'] = '^[' + param['aa'] + ']*' + param['xresidue'] + '[' + param['aa'] + ']+$'
param['prior_t_TT_TF'] = float(param['nTT']) / (param['nTT'] + param['nTF'])
param['prior_t_TF_FF'] = float(param['nTF']) / (param['nTF'] + param['nFF'])
f.close()
mass[param['fix_mod_res']] += param['fix_mod_mass']
return [param, mass]
def readSpectra(directory, param, mass):
files = glob.glob(directory + '*.mzXML')
specdict = dict()
total = []
for filename in files:
reader = MZXMLReader(filename)
spectra = reader.getSpectraList(mass, param)
total.append(spectra)
ss = []
for i in range(len(total)):
ss.append(set())
tmp = []
for j in range(len(total[i])):
if total[i][j].rt >= 0 and total[i][j].rt <= 110*60 and total[i][j].ch >= 2 and total[i][j].ch <= 7:
tmp.append(total[i][j])
tmp = sorted(tmp, key = lambda s : s.mr)
tolerance = 0.01
lower_ratio = 0.3
upper_ratio = 1 / float(lower_ratio)
for j in range(len(tmp) - 1):
MZ = []
IT = []
mz = tmp[j].mz
it = tmp[j].it
lastindex = 0
ik = 0
jk = 0
for ik in range(len(mz)):
if lastindex == 0:
jk = 0
else:
jk = lastindex
while not (lastindex > len(mz) - 1 or jk > len(mz) - 1 or ik > len(mz) - 1 or mz[jk] > mz[ik] + tolerance):
if mz[jk] <= mz[ik] - tolerance:
lastindex = jk
ratio = float(it[ik]) / float(it[jk])
if abs(mz[ik] - mz[jk]) <= tolerance and ratio >= lower_ratio and ratio <= upper_ratio:
MZ.append(mz[ik])
IT.append(it[ik])
jk = jk + 1
if len(MZ) >= 25:
specdict[tmp[j].title] = tmp[j]
return specdict
# deisotope spectra
# deisotoped = dict()
# titles = specdict.keys()
# for i in range(len(titles)):
# title = titles[i]
# (one, align) = specdict[title].deisotope(mass, 4, 0.02)
# deisotoped[title] = one
# return deisotoped
def getTophits(index, result):
model_TT_TF = index.param['model_TT_TF']
model_TF_FF = index.param['model_TF_FF']
prior_t_TT_TF = index.param['prior_t_TT_TF']
prior_t_TF_FF = index.param['prior_t_TF_FF']
it = index.param['MAX_ITERATIONS']
p21 = []
p11 = []
p12 = []
p22 = []
for i in range(len(result)):
print i
# title = result[i][0]
# ch = str(title.split('.')[-1])
p21.append([])
p11.append([])
p12.append([])
p22.append([])
# S = []
for j in range(len(result[i][1])):
# pep1 = index.uniquePepObjs[0][result[i][1][j][0][0]]
# pep2 = index.uniquePepObjs[0][result[i][1][j][0][1]]
# positions = result[i][1][j][1]
feature = result[i][1][j][2]
x = list(feature[0])
x.extend(feature[1])
xflip = list(feature[1])
xflip.extend(feature[0])
# s = [pep1.sequence, pep2.sequence, str(positions[0] + 1), str(positions[1] + 1)]
# s = '_'.join(s)
# s = s + '_' + ch + '_' + title
# S.append(s)
b = model_TT_TF
p21[-1].append(logisticEval(b, x))
p11[-1].append(logisticEval(b, xflip))
b = model_TF_FF
p12[-1].append(logisticEval(b, x))
p22[-1].append(logisticEval(b, xflip))
[alphaT, betaT, alphaF, betaF] = getMarginal(p21, p11, p12, p22)
p21 = adjustPriorMarginal(prior_t_TT_TF, p21, alphaT, it)
p11 = adjustPriorMarginal(prior_t_TT_TF, p11, betaT, it)
p12 = adjustPriorMarginal(prior_t_TF_FF, p12, betaF, it)
p22 = adjustPriorMarginal(prior_t_TF_FF, p22, alphaF, it)
for i in range(len(result)):
print i
result[i] = list(result[i])
for j in range(len(result[i][1])):
pep1 = index.uniquePepObjs[0][result[i][1][j][0][0]]
pep2 = index.uniquePepObjs[0][result[i][1][j][0][1]]
ap21 = p21[i][j]
ap11 = p11[i][j]
ap12 = p12[i][j]
ap22 = p22[i][j]
denom = 1 - (ap11 - ap12) * (ap21 - ap22)
alaph_T = (ap12 + ap22 * (ap11 - ap12)) / denom
beta_T = (ap22 + ap12 * (ap21 - ap22)) / denom
prob1 = ap11 * beta_T
prob2 = ap21 * alaph_T
score = (prob1 + prob2) / float(2)
info = {'alpha' : alaph_T, 'beta' : beta_T, 'prob' : [prob1, prob2], 'score' : score}
result[i][1][j] = list(result[i][1][j])
result[i][1][j].append(info)
for r in result:
r[1] = sorted(r[1], key = lambda x : x[3]['score'], reverse = True)
result = sorted(result, key = lambda x : x[1][0][3]['score'], reverse = True)
tophits = []
for r in result:
scan = r[0]
pep = [index.uniquePepObjs[0][r[1][0][0][0]].sequence, index.uniquePepObjs[0][r[1][0][0][1]].sequence]
pos = [int(r[1][0][1][0]), int(r[1][0][1][1])]
pro = [index.uniquePepObjs[0][r[1][0][0][0]].proteinID, index.uniquePepObjs[0][r[1][0][0][1]].proteinID]
ch = int(scan.split('.')[-1])
score = r[1][0][3]['score']
tophits.append([pep, pos, pro, ch, score, scan])
return tophits
def write_results(output_file, tophits):
f = open(output_file, 'w')
f.write('Rank\tPep_alpha\tPep_beta\tSite_alpha\tSite_beta\tPro_alpha\tPro_beta\tCharge\tpr(alpha=T,beta=T)\tSpectrum\n')
for i in range(len(tophits)):
f.write('%d\t' % (i + 1))
f.write('%s\t%s\t' % (tophits[i][0][0], tophits[i][0][1]))
f.write('%d\t%d\t' % (tophits[i][1][0], tophits[i][1][1]))
f.write('%s\t%s\t' % (','.join(tophits[i][2][0]), ','.join(tophits[i][2][1])))
f.write('%d\t' % tophits[i][3])
f.write('%E\t' % tophits[i][4])
f.write('%s\n' % tophits[i][5])
f.close()
def get_true_true(result, index, param, mass):
TT = []
for i in range(len(result)):
scan = result[i][0]
spectrum = index.spectraDict[scan]
charge = spectrum.ch
cand = []
sumint = []
for j in range(len(result[i][1])):
pepObj1 = index.uniquePepObjs[0][result[i][1][j][0][0]]
pepObj2 = index.uniquePepObjs[0][result[i][1][j][0][1]]
SL = [set(), set()]
positions = result[i][1][j][1]
for pro in pepObj1.proteinID:
cols = pro.split('|R')
SL[0].add(cols[1][0])
for pro in pepObj2.proteinID:
cols = pro.split('|R')
SL[1].add(cols[1][0])
feature = list(result[i][1][j][2][0])
feature.extend(result[i][1][j][2][1])
if feature[0] / float(feature[7]) >= 0.20 and feature[1] / float(feature[7]) >= 0.20 and feature[8] / float(feature[15]) >= 0.20 and feature[9] / float(feature[15]) >= 0.20 and feature[2] >= 0.1 and feature[10] >= 0.1 and len(SL[0].intersection(SL[1])) > 0:
xl = XLink(pepObj1, pepObj2, positions, charge, mass, param)
match = Match(spectrum, xl, mass, param)
match.match(mass)
cand.append(match)
sumint.append(feature[2] + feature[10])
if len(cand) == 0:
continue
comb = zip(cand, sumint)
cand = list(zip(*sorted(comb, key = lambda x : x[1], reverse = True))[0])
sumint = list(zip(*sorted(comb, key = lambda x : x[1], reverse = True))[1])
TT.append(cand[0])
for i in range(len(TT)):
pepObj1 = TT[i].xlink.pepObjs[0]
pepObj2 = TT[i].xlink.pepObjs[1]
s = pepObj1.sequence + '\t' + pepObj2.sequence + '\t' + ','.join(pepObj1.proteinID) + '\t' + ','.join(pepObj2.proteinID)
print s
return TT
def get_true_false(TrueTrue, param, mass):
linkermass = param['linkermass']
ttseq = set()
m = []
for match in TrueTrue:
ttseq.add(match.xlink.pepObjs[0].sequence)
ttseq.add(match.xlink.pepObjs[1].sequence)
m.append(match.xlink.mr - linkermass)
m = max(m)
fasta = FastaReader(param['uniprot_database']).readFasta()
patternString = param['patternstring']
peptides = dict()
for header, sequence in fasta:
if 'MOUSE' in header:
pepObjInPro = getPepObjsFromProtein(header, sequence, patternString, mass, param)
for pep in pepObjInPro:
if pep.sequence not in peptides and pep.sequence not in ttseq and pep.pm < m:
peptides[pep.sequence] = pep
peptides = peptides.values()
TrueFalse = []
A = []
B = []
for i in range(len(TrueTrue)):
print i
match = TrueTrue[i]
ch = match.spectrum.ch
tol = match.xlink.mr * 5 * (10 ** (-6))
A.append([])
B.append([])
pepObjs = match.xlink.pepObjs
for j in range(len(peptides)):
if abs(pepObjs[0].pm + peptides[j].pm + linkermass - match.xlink.mr) <= tol:
pepseq1 = pepObjs[0].sequence
pepseq2 = peptides[j].sequence
kPos1 = list(zip(*filter(lambda x : x[1] == param['xresidue'], enumerate(pepseq1[:-1])))[0])
kPos2 = list(zip(*filter(lambda x : x[1] == param['xresidue'], enumerate(pepseq2[:-1])))[0])
positions = [kPos1[len(kPos1)/2], kPos2[len(kPos2)/2]]
xl = XLink(pepObjs[0], peptides[j], positions, ch, mass, param)
tf = Match(match.spectrum, xl, mass, param)
tf.match(mass)
feature = tf.feature
if feature[1][0] / float(feature[1][7]) + feature[1][1] / float(feature[1][7]) >= 0.2:
A[-1].append(tf)
for j in range(len(peptides)):
if abs(pepObjs[1].pm + peptides[j].pm + linkermass - match.xlink.mr) <= tol:
pepseq1 = pepObjs[1].sequence
pepseq2 = peptides[j].sequence
kPos1 = list(zip(*filter(lambda x : x[1] == param['xresidue'], enumerate(pepseq1[:-1])))[0])
kPos2 = list(zip(*filter(lambda x : x[1] == param['xresidue'], enumerate(pepseq2[:-1])))[0])
positions = [kPos1[len(kPos1)/2], kPos2[len(kPos2)/2]]
xl = XLink(pepObjs[1], peptides[j], positions, ch, mass, param)
tf = Match(match.spectrum, xl, mass, param)
tf.match(mass)
feature = tf.feature
if feature[1][0] / float(feature[1][7]) + feature[1][1] / float(feature[1][7])>= 0.2:
B[-1].append(tf)
AB = [A, B]
return AB
def get_false_false(TrueTrue, param, mass):
linkermass = param['linkermass']
ttseq = set()
m = []
for match in TrueTrue:
ttseq.add(match.xlink.pepObjs[0].sequence)
ttseq.add(match.xlink.pepObjs[1].sequence)
m.append(match.xlink.mr - linkermass)
mi = int(min(m) - 0.2)
ma = int(max(m) + 0.2)
fasta = FastaReader(param['uniprot_database']).readFasta()
patternString = param['patternstring']
peptides = dict()
for header, sequence in fasta:
if 'YEAST' in header:
pepObjInPro = getPepObjsFromProtein(header, sequence, patternString, mass, param)
for pep in pepObjInPro:
if pep.sequence not in peptides and pep.sequence not in ttseq:
peptides[pep.sequence] = pep
peptides = peptides.values()
intdict = dict()
for peptide in peptides:
num = int(peptide.pm)
if num > ma:
continue
if num not in intdict:
intdict[num] = [peptide]
else:
intdict[num].append(peptide)
FalseFalse = []
for k in range(len(TrueTrue)):
match = TrueTrue[k]
print k
sys.stdout.flush()
FalseFalse.append([])
pm = match.xlink.mr - linkermass
ch = match.spectrum.ch
tol = match.xlink.mr * 3 * (10 ** (-6))
mlist = range(500, ma - 500)
shuffle(mlist)
mlist = mlist[:25]
for m in mlist:
if m not in intdict:
continue
shuffle(intdict[m])
intdict[m] = intdict[m][:50]
for i in range(len(intdict[m])):
num = int(pm - intdict[m][i].pm)
if num not in intdict:
continue
shuffle(intdict[num])
intdict[num] = intdict[num][:50]
for j in range(len(intdict[num])):
pepseq1 = intdict[m][i].sequence
pepseq2 = intdict[num][j].sequence
kPos1 = list(zip(*filter(lambda x : x[1] == param['xresidue'], enumerate(pepseq1[:-1])))[0])
kPos2 = list(zip(*filter(lambda x : x[1] == param['xresidue'], enumerate(pepseq2[:-1])))[0])
positions = [kPos1[len(kPos1)/2], kPos2[len(kPos2)/2]]
xl = XLink(intdict[m][i], intdict[num][j], positions, ch, mass, param)
if abs(match.xlink.mr - xl.mr) <= tol:
ff = Match(match.spectrum, xl, mass, param)
ff.match(mass)
feature = ff.feature
if feature[0][0] / float(feature[0][7]) + feature[0][1] / float(feature[0][7]) >= 0.15 and feature[1][0] / float(feature[1][7]) + feature[1][1] / float(feature[1][7]) >= 0.15:
FalseFalse[-1].append(ff)
return FalseFalse
def calculate_feature(tt, AB, FF):
# tt
Xtt = []
for i in range(len(tt)):
feature = list(tt[i].feature[0])
feature.extend(tt[i].feature[1])
Xtt.append(feature)
# tf
A = AB[0]
B = AB[1]
AB = []
for a in A:
AB.extend(a)
for b in B:
AB.extend(b)
Xtf = []
for i in range(len(AB)):
feature = list(AB[i].feature[0])
feature.extend(AB[i].feature[1])
Xtf.append(feature)
# ff
ff = []
for f in FF:
ff.extend(f)
Xff = []
for i in range(len(ff)):
feature = list(ff[i].feature[0])
feature.extend(ff[i].feature[1])
Xff.append(feature)
return [Xtt, Xtf, Xff]
|
|
# -----------------------------------------------------------------------------
# QP/Python Library
#
# Port of Miro Samek's Quantum Framework to Python. The implementation takes
# the liberty to depart from Miro Samek's code where the specifics of desktop
# systems (compared to embedded systems) seem to warrant a different approach.
#
# Reference:
# Practical Statecharts in C/C++; Quantum Programming for Embedded Systems
# Author: Miro Samek, Ph.D.
# http://www.state-machine.com/
#
# -----------------------------------------------------------------------------
#
# Copyright (C) 2008-2014, Autolabel AB
# All rights reserved
# Author(s): Henrik Bohre (henrik.bohre@autolabel.se)
#
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
#
# - Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
#
# - Neither the name of Autolabel AB, nor the names of its contributors
# may be used to endorse or promote products derived from this
# software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
# FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL
# THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT,
# INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
# (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
# HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
# STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED
# OF THE POSSIBILITY OF SUCH DAMAGE.
# -----------------------------------------------------------------------------
"""Python GTK port of the Quantum Calculator example
Debian package dependencies:
python-glade2
python-gtk2
"""
# Standard
import sys
import os.path
import logging
# External
import gtk
import gtk.glade
# Local - Expects us to be two levels below the library
sys.path.insert(0, os.path.join('..', '..'))
import qp
import qptest
# System wide signal definitions
[
C_SIG,
CE_SIG,
DIGIT_0_SIG,
DIGIT_1_9_SIG,
POINT_SIG,
OPER_SIG,
EQUALS_SIG,
TERMINATE_SIG,
IGNORE_SIG
] = range(qp.USER_SIG, qp.USER_SIG + 9)
KEY_UNKNOWN = '?'
KEY_PLUS = '+'
KEY_MINUS = '-'
KEY_MULT = '*'
KEY_DIVIDE = '/'
# Setup logging
if sys.version_info[:2] < (2, 5): # funcName variable not supported
logging_format = "%(levelname)s %(name)s: %(message)s"
else:
logging_format = "%(levelname)s %(name)s.%(funcName)s: %(message)s"
logging.basicConfig(level=logging.DEBUG, format=logging_format)
class QCalcGui(object):
"""GTK GUI"""
logger = logging.getLogger('QCalcGui')
def __init__(self, engine):
self.engine = engine # Calculator engine
self.engine.init()
# Instantiate gui from glade file
self.widgets = gtk.glade.XML('qcalc.glade')
# Connect widget and handlers
signals = {
'button_0': (DIGIT_0_SIG, '0'),
'button_1': (DIGIT_1_9_SIG, '1'),
'button_2': (DIGIT_1_9_SIG, '2'),
'button_3': (DIGIT_1_9_SIG, '3'),
'button_4': (DIGIT_1_9_SIG, '4'),
'button_5': (DIGIT_1_9_SIG, '5'),
'button_6': (DIGIT_1_9_SIG, '6'),
'button_7': (DIGIT_1_9_SIG, '7'),
'button_8': (DIGIT_1_9_SIG, '8'),
'button_9': (DIGIT_1_9_SIG, '9'),
'button_c': (C_SIG, None),
'button_ce': (CE_SIG, None),
'button_point': (POINT_SIG, '.'),
'button_plus': (OPER_SIG, KEY_PLUS),
'button_minus': (OPER_SIG, KEY_MINUS),
'button_divide': (OPER_SIG, KEY_DIVIDE),
'button_mult': (OPER_SIG, KEY_MULT),
'button_equals': (EQUALS_SIG, None),
}
for name, (sig, key) in signals.iteritems():
widget = self.widgets.get_widget(name)
widget.connect('clicked', self.button_clicked, sig, key)
top = self.widgets.get_widget('top')
top.connect('destroy', gtk.main_quit)
top.show_all()
def button_clicked(self, button, sig, key):
# Main driver of state machine events
# Create event from button signal handler
e = qp.Event(sig)
e.key = key
self.engine.dispatch(e)
text = self.engine.get_display()
self.widgets.get_widget('display').set_label(text)
class QCalc(qp.Hsm):
signals = [
C_SIG,
CE_SIG,
DIGIT_0_SIG,
DIGIT_1_9_SIG,
POINT_SIG,
OPER_SIG,
EQUALS_SIG,
TERMINATE_SIG,
IGNORE_SIG,
]
def __init__(self, disp_width=14):
qp.Hsm.__init__(self, self.__class__.initial)
self.logger = logging.getLogger('QCalc')
self.disp_width = disp_width
self.display_ = ' ' * self.disp_width
self.operand1_ = 0
self.operand2_ = 0
self.op_key_ = KEY_UNKNOWN
def initial(self, e):
self.clear()
self.INIT(self.__class__.on)
def on(self, e):
if e.sig == qp.ENTRY_SIG:
self.logger.info('entry/')
return 0
elif e.sig == qp.INIT_SIG:
self.logger.info('init/')
self.INIT(self.__class__.ready)
return 0
elif e.sig == C_SIG:
self.clear()
self.TRAN(self.__class__.on)
return 0
return qp.Hsm.top
def ready(self, e):
if e.sig == qp.ENTRY_SIG:
self.logger.info('entry/')
return 0
elif e.sig == qp.INIT_SIG:
self.INIT(self.__class__.begin)
return 0
elif e.sig == DIGIT_0_SIG:
self.clear()
self.TRAN(self.__class__.zero1)
return 0
elif e.sig == DIGIT_1_9_SIG:
self.clear()
self.insert(e.key)
self.TRAN(self.__class__.int1)
return 0
elif e.sig == POINT_SIG:
self.clear()
self.insert('0')
self.insert('.')
self.TRAN(self.__class__.frac1)
return 0
elif e.sig == OPER_SIG:
self.operand1_ = eval(self.display_)
self.op_key_ = e.key
self.TRAN(self.__class__.op_entered)
return 0
return self.__class__.on
def result(self, e):
if e.sig == qp.ENTRY_SIG:
self.logger.info('entry/')
return 0
elif e.sig == qp.EXIT_SIG:
self.logger.info('exit/')
return 0
return self.__class__.ready
def begin(self, e):
if e.sig == qp.ENTRY_SIG:
self.logger.info('entry/')
return 0
elif e.sig == qp.EXIT_SIG:
self.logger.info('exit/')
return 0
elif e.sig == OPER_SIG:
if e.key == KEY_MINUS:
self.TRAN(self.__class__.negated1)
return 0
return self.__class__.ready
def negated1(self, e):
if e.sig == qp.ENTRY_SIG:
self.logger.info('entry/')
self.negate()
return 0
elif e.sig == qp.EXIT_SIG:
self.logger.info('exit/')
return 0
elif e.sig == OPER_SIG:
if e.key == KEY_MINUS:
self.logger.warning('ignored')
return 0
elif e.sig == CE_SIG:
self.clear()
self.TRAN(self.__class__.begin)
return 0
elif e.sig == DIGIT_0_SIG:
self.insert(e.key)
self.TRAN(self.__class__.zero1)
return 0
elif e.sig == DIGIT_1_9_SIG:
self.insert(e.key)
self.TRAN(self.__class__.int1)
return 0
elif e.sig == POINT_SIG:
self.insert(e.key)
self.TRAN(self.__class__.frac1)
return 0
return self.__class__.on
def operand1(self, e):
if e.sig == qp.ENTRY_SIG:
self.logger.info('entry/')
return 0
elif e.sig == qp.EXIT_SIG:
self.logger.info('exit/')
return 0
elif e.sig == CE_SIG:
self.clear()
self.TRAN(self.__class__.begin)
return 0
elif e.sig == OPER_SIG:
self.operand1_ = eval(self.display_)
self.op_key_ = e.key
self.TRAN(self.__class__.op_entered)
return 0
return self.__class__.on
def zero1(self, e):
if e.sig == qp.ENTRY_SIG:
self.logger.info('entry/')
return 0
elif e.sig == qp.EXIT_SIG:
self.logger.info('exit/')
return 0
elif e.sig == DIGIT_0_SIG:
self.logger.warning('ignored')
return 0
elif e.sig == DIGIT_1_9_SIG:
self.insert(e.key)
self.TRAN(self.__class__.int1)
return 0
elif e.sig == POINT_SIG:
self.insert(e.key)
self.TRAN(self.__class__.frac1)
return 0
return self.__class__.operand1
def int1(self, e):
if e.sig == qp.ENTRY_SIG:
self.logger.info('entry/')
return 0
elif e.sig == qp.EXIT_SIG:
self.logger.info('exit/')
return 0
elif e.sig in [DIGIT_0_SIG, DIGIT_1_9_SIG]:
self.insert(e.key)
return 0
elif e.sig == POINT_SIG:
self.insert(e.key)
self.TRAN(self.__class__.frac1)
return 0
return self.__class__.operand1
def frac1(self, e):
if e.sig == qp.ENTRY_SIG:
self.logger.info('entry/')
return 0
elif e.sig == qp.EXIT_SIG:
self.logger.info('exit/')
return 0
elif e.sig in [DIGIT_0_SIG, DIGIT_1_9_SIG]:
self.insert(e.key)
return 0
elif e.sig == POINT_SIG:
self.logger.warning('ignored')
return 0
return self.__class__.operand1
def error(self, e):
if e.sig == qp.ENTRY_SIG:
self.logger.info('entry/')
return 0
elif e.sig == qp.EXIT_SIG:
self.logger.info('exit/')
return 0
return self.__class__.on
def op_entered(self, e):
if e.sig == qp.ENTRY_SIG:
self.logger.info('entry/')
return 0
elif e.sig == qp.EXIT_SIG:
self.logger.info('exit/')
return 0
elif e.sig == OPER_SIG:
if e.key == KEY_MINUS:
self.clear()
self.TRAN(self.__class__.negated2)
return 0
elif e.sig == DIGIT_0_SIG:
self.clear()
self.TRAN(self.__class__.zero2)
return 0
elif e.sig == DIGIT_1_9_SIG:
self.clear()
self.insert(e.key)
self.TRAN(self.__class__.int2)
return 0
elif e.sig == POINT_SIG:
self.clear()
self.insert('0')
self.insert('.')
self.TRAN(self.__class__.frac2)
return 0
return self.__class__.on
def negated2(self, e):
if e.sig == qp.ENTRY_SIG:
self.logger.info('entry/')
self.negate()
return 0
elif e.sig == qp.EXIT_SIG:
self.logger.info('exit/')
return 0
elif e.sig == OPER_SIG:
if e.key == KEY_MINUS:
self.logger.warning('ignored')
return 0
elif e.sig == CE_SIG:
self.TRAN(self.__class__.op_entered)
return 0
elif e.sig == DIGIT_0_SIG:
self.TRAN(self.__class__.zero2)
return 0
elif e.sig == DIGIT_1_9_SIG:
self.insert(e.key)
self.TRAN(self.__class__.int2)
return 0
elif e.sig == POINT_SIG:
self.insert(e.key)
self.TRAN(self.__class__.frac2)
return 0
return self.__class__.on
def operand2(self, e):
if e.sig == qp.ENTRY_SIG:
self.logger.info('entry/')
return 0
elif e.sig == qp.EXIT_SIG:
self.logger.info('exit/')
return 0
elif e.sig == CE_SIG:
self.clear()
self.TRAN(self.__class__.op_entered)
return 0
elif e.sig == OPER_SIG:
self.operand2_ = eval(self.display_)
if self.eval():
self.operand1_ = eval(self.display_)
self.op_key_ = e.key
self.TRAN(self.__class__.op_entered)
else:
self.TRAN(self.__class__.error)
return 0
elif e.sig == EQUALS_SIG:
self.operand2_ = eval(self.display_)
if self.eval():
self.TRAN(self.__class__.result)
else:
self.TRAN(self.__class__.error)
return 0
return self.__class__.on
def zero2(self, e):
if e.sig == qp.ENTRY_SIG:
self.logger.info('entry/')
return 0
elif e.sig == qp.EXIT_SIG:
self.logger.info('exit/')
return 0
elif e.sig == DIGIT_0_SIG:
self.logger.warning('ignored')
return 0
elif e.sig == DIGIT_1_9_SIG:
self.insert(e.key)
self.TRAN(self.__class__.int2)
return 0
elif e.sig == POINT_SIG:
self.insert(e.key)
self.TRAN(self.__class__.frac2)
return 0
return self.__class__.operand2
def int2(self, e):
if e.sig == qp.ENTRY_SIG:
self.logger.info('entry/')
return 0
elif e.sig == qp.EXIT_SIG:
self.logger.info('exit/')
return 0
elif e.sig in [DIGIT_0_SIG, DIGIT_1_9_SIG]:
self.insert(e.key)
return 0
elif e.sig == POINT_SIG:
self.insert(e.key)
self.TRAN(self.__class__.frac2)
return 0
return self.__class__.operand2
def frac2(self, e):
if e.sig == qp.ENTRY_SIG:
self.logger.info('entry/')
return 0
elif e.sig == qp.EXIT_SIG:
self.logger.info('exit/')
return 0
elif e.sig in [DIGIT_0_SIG, DIGIT_1_9_SIG]:
self.insert(e.key)
return 0
elif e.sig == POINT_SIG:
self.logger.warning('ignored')
return 0
return self.__class__.operand2
# Non-state methods
def clear(self):
self.display_ = ' ' * (self.disp_width - 1) + '0'
self.len_ = 0
def insert(self, key):
if self.len_ == 0:
self.display_ = self.display_[0:-1] + key
self.len_ += 1
elif self.len_ < self.disp_width - 1:
self.display_ = self.display_[1:] + key
self.len_ += 1
else:
self.logger.warning('Overflow')
def negate(self):
self.clear()
self.display_ = self.display_[0:-2] + '-' + self.display_[-1]
def eval(self):
ok = True
result = 0
if self.op_key_ == KEY_PLUS:
result = self.operand1_ + self.operand2_
elif self.op_key_ == KEY_MINUS:
result = self.operand1_ - self.operand2_
elif self.op_key_ == KEY_MULT:
result = self.operand1_ * self.operand2_
elif self.op_key_ == KEY_DIVIDE:
if abs(self.operand2_) > 1e-10:
result = float(self.operand1_) / float(self.operand2_)
else:
self.display_ = "Error 0"
ok = False
else:
assert False
if ok:
if abs(result) < 1.0e10:
self.display_ = '%14.11g' % result
else:
self.display_ = 'Error 1'
ok = False
return ok
def get_display(self):
return self.display_
class TestQCalc(qptest.HsmTestCase):
def setUp(self):
self.dut = QCalc()
self.dut.init()
def test_that_clear_leaves_display_with_single_zero(self):
# Given a QCalc engine with disp_width = 14
self.dut.display_ = 'XXX'
# When calling clear
self.dut.clear()
# Then
self.assertEqual(' 0', self.dut.display_)
def test_that_clear_sets_len_to_zero(self):
# Given a QCalc engine with disp_width = 14
self.dut.len_ = 12
# When calling clear
self.dut.clear()
# Then
self.assertEqual(0, self.dut.len_)
def test_that_insert_increases_len_when_two_less_than_disp_width(self):
# Given a QCalc engine with disp_width = 12
engine = QCalc(disp_width=12)
engine.len_ = 10
# When calling insert
engine.insert('0')
# Then
self.assertEqual(11, engine.len_)
def test_transition_from_begin_to_int1(self):
# Given a QCalc engine in begin
self.dut.TRAN(QCalc.begin)
empty = qp.Event(qp.qep._QEP_EMPTY_SIG)
self.dut.dispatch(empty)
# When pressing a numeric button
button = qp.Event(DIGIT_1_9_SIG)
button.key = '1'
self.dut.dispatch(button)
# Then
self.assertTrue(self.dut.is_in(QCalc.int1))
def test_transition_from_int1_to_frac1(self):
# Given a QCalc engine in int1
self.dut.TRAN(QCalc.int1)
empty = qp.Event(qp.qep._QEP_EMPTY_SIG)
self.dut.dispatch(empty)
# When pressing the point button
button = qp.Event(POINT_SIG)
button.key = '.'
self.dut.dispatch(button)
# Then
self.assertTrue(self.dut.is_in(QCalc.frac1))
if __name__ == '__main__':
engine = QCalc()
app = QCalcGui(engine)
gtk.main()
|
|
import time
import os
import traceback
import datetime
import codecs
from opsbro.library import libstore
from opsbro.module import HandlerModule
from opsbro.parameters import BoolParameter, StringParameter, StringListParameter
class MailHandlerModule(HandlerModule):
implement = 'mail'
parameters = {
'enabled' : BoolParameter(default=False),
'severities' : StringListParameter(default=['ok', 'warning', 'critical', 'unknown']),
'contacts' : StringListParameter(default=['naparuba@gmail.com']),
'addr_from' : StringParameter(default='opsbro@mydomain.com'),
'smtp_server' : StringParameter(default='localhost'),
'smtps' : BoolParameter(default=False),
'check_subject_template' : StringParameter(default='mail-check-subject.tpl'),
'check_text_template' : StringParameter(default='mail-check-text.tpl'),
'group_subject_template' : StringParameter(default='mail-group-subject.tpl'),
'group_text_template' : StringParameter(default='mail-group-text.tpl'),
'compliance_subject_template': StringParameter(default='mail-compliance-subject.tpl'),
'compliance_text_template' : StringParameter(default='mail-compliance-text.tpl'),
}
def __init__(self):
super(MailHandlerModule, self).__init__()
self.jinja2 = libstore.get_jinja2()
self.smtplib = None
# Check templates, to load them only once
self.__computed_templates = {'check' : {'subject': None, 'text': None},
'group' : {'subject': None, 'text': None},
'compliance': {'subject': None, 'text': None},
}
def __send_email(self, addr_from, msg, about_what):
# Lazy load smtplib
if self.smtplib is None:
import smtplib
self.smtplib = smtplib
smtp_server = self.get_parameter("smtp_server")
smtps = self.get_parameter("smtps")
contacts = self.get_parameter('contacts')
try:
self.logger.debug("Handler: MAIL connection to %s" % smtp_server)
s = self.smtplib.SMTP(smtp_server, timeout=30)
r = s.sendmail(addr_from, contacts, msg.as_string())
s.quit()
self.logger.info('Did send an email to %d contacts (%s) about %s' % (len(contacts), ','.join(contacts), about_what))
except Exception:
self.logger.error('Cannot send mail: %s' % traceback.format_exc())
def __get_msg(self, addr_from, subject_m, text_m):
from email.mime.text import MIMEText
from email.header import Header
msg = MIMEText(text_m, 'plain', 'utf-8')
msg['From'] = addr_from
msg['Subject'] = Header(subject_m, 'utf-8')
return msg
def __get_computed_template(self, for_what, which_template):
what_entry = self.__computed_templates[for_what]
return what_entry[which_template]
def __load_and_compute_one_template(self, for_what, which_template):
templates_dir = os.path.join(self.pack_directory, 'templates')
pth = self.get_parameter('%s_%s_template' % (for_what, which_template))
full_pth = os.path.join(templates_dir, pth)
if not os.path.exists(full_pth):
self.logger.error('Missing template file %s_%s_template: %s' % (for_what, which_template, full_pth))
return False
try:
with codecs.open(full_pth, 'r', 'utf8') as f:
buf = f.read()
except Exception as exp:
self.logger.error('Cannot load template file %s_%s_template (%s) : %s' % (for_what, which_template, full_pth, exp))
return False
try:
tpl = self.jinja2.Template(buf)
except Exception as exp:
self.logger.error('The template %s_%s_template (%s) did raised an error when parsing: %s' % (for_what, which_template, full_pth, exp))
return False
# Ok we can save it
what_entry = self.__computed_templates[for_what]
what_entry[which_template] = tpl
return True
def __compute_templates(self, for_what):
# Maybe it's already computed
subject_tpl = self.__get_computed_template(for_what, 'subject')
text_tpl = self.__get_computed_template(for_what, 'text')
if subject_tpl is not None and text_tpl is not None:
return True
success = True
success &= self.__load_and_compute_one_template(for_what, 'subject')
success &= self.__load_and_compute_one_template(for_what, 'text')
subject_tpl = self.__get_computed_template(for_what, 'subject')
text_tpl = self.__get_computed_template(for_what, 'text')
return subject_tpl is not None and text_tpl is not None
def send_mail_check(self, check):
have_templates = self.__compute_templates('check')
if not have_templates:
self.logger.error('We do not have templates available, skiping the email sending')
return
subject_tpl = self.__get_computed_template('check', 'subject')
text_tpl = self.__get_computed_template('check', 'text')
try:
_time = datetime.datetime.fromtimestamp(int(time.time())).strftime('%Y-%m-%d %H:%M:%S')
subject_m = subject_tpl.render(check=check, _time=_time)
text_m = text_tpl.render(check=check, _time=_time)
addr_from = self.get_parameter('addr_from')
msg = self.__get_msg(addr_from, subject_m, text_m)
self.__send_email(addr_from, msg, 'check state change')
except:
self.logger.error('Cannot send mail for check: %s' % traceback.format_exc())
def send_mail_group(self, group, group_modification):
have_templates = self.__compute_templates('group')
if not have_templates:
self.logger.error('We do not have templates available, skiping the email sending')
return
subject_tpl = self.__get_computed_template('group', 'subject')
text_tpl = self.__get_computed_template('group', 'text')
try:
_time = datetime.datetime.fromtimestamp(int(time.time())).strftime('%Y-%m-%d %H:%M:%S')
subject_m = subject_tpl.render(group=group, group_modification=group_modification)
text_m = text_tpl.render(group=group, group_modification=group_modification)
addr_from = self.get_parameter('addr_from')
msg = self.__get_msg(addr_from, subject_m, text_m)
self.__send_email(addr_from, msg, 'group modification')
except:
self.logger.error('Cannot send mail for group modification: %s' % traceback.format_exc())
def send_mail_compliance(self, compliance):
have_templates = self.__compute_templates('compliance')
if not have_templates:
self.logger.error('We do not have templates available, skiping the email sending')
return
subject_tpl = self.__get_computed_template('compliance', 'subject')
text_tpl = self.__get_computed_template('compliance', 'text')
try:
_time = datetime.datetime.fromtimestamp(int(time.time())).strftime('%Y-%m-%d %H:%M:%S')
subject_m = subject_tpl.render(compliance=compliance, _time=_time)
text_m = text_tpl.render(compliance=compliance, _time=_time)
addr_from = self.get_parameter('addr_from')
msg = self.__get_msg(addr_from, subject_m, text_m)
self.__send_email(addr_from, msg, 'compliance rule state change')
except:
self.logger.error('Cannot send mail for compliance modification: %s' % traceback.format_exc())
def handle(self, obj, event):
enabled = self.get_parameter('enabled')
if not enabled:
self.logger.debug('Mail module is not enabled, skipping check alert sent')
return
self.logger.debug('Manage an obj event: %s (event=%s)' % (obj, event))
evt_type = event['evt_type']
# Checks: only notify about changes
if evt_type == 'check_execution':
evt_data = event['evt_data']
check_did_change = evt_data['check_did_change']
if check_did_change:
self.send_mail_check(obj)
# We are launched only if the group did change
if evt_type == 'group_change':
evt_data = event['evt_data']
group_modification = evt_data['modification']
self.send_mail_group(obj, group_modification)
# Compliance: only when change, and only some switch cases should be
# notify (drop useless changes)
if evt_type == 'compliance_execution':
evt_data = event['evt_data']
compliance_did_change = evt_data['compliance_did_change']
if compliance_did_change:
self.send_mail_compliance(obj)
|
|
from copy import deepcopy
from tmd.pwscf.build import nscf_ks, wannier_num_bands
import tmd.pwscf.cell as cell
def Header(nbnd, num_wann, soc):
lines = ["num_bands = {}".format(str(nbnd))]
lines.append("num_wann = {}".format(str(num_wann)))
lines.append("num_iter = 0")
# TODO - why is this conditional in SKfit?
# Ambiguity in defining up/down basis?
#if not soc:
# lines.append("hr_plot = true")
lines.append("hr_plot = true")
lines.append("")
return lines
def Disentanglement():
# TODO - is it possible to set reasonable defaults for:
# dis_win_min, dis_win_max, dis_froz_min, dis_froz_max?
# Leave with Wannier90 defaults for now: no inner window;
# outer window covers all evs.
lines = ["#TODO - set disentanglement window."]
lines.append("dis_win_min = 0.0d0")
lines.append("dis_win_max = 10.0d0")
lines.append("dis_froz_min = 0.0d0")
lines.append("dis_froz_max = 0.0d0")
lines.append("dis_num_iter = 10000")
lines.append("dis_mix_ratio = 0.5")
lines.append("")
return lines
def Update_Disentanglement(win_path, E_Fermi, outer, inner):
vals = {"dis_win_min": E_Fermi + outer[0],
"dis_win_max": E_Fermi + outer[1],
"dis_froz_min": E_Fermi + inner[0],
"dis_froz_max": E_Fermi + inner[1]}
with open(win_path, 'r') as fp:
lines = fp.readlines()
updated = []
for line in lines:
if line.startswith("#TODO - set disentanglement window."):
continue
new_line = line
for k, v in vals.items():
if line.startswith(k):
new_line = "{} = {}\n".format(k, v)
break
updated.append(new_line)
with open(win_path, 'w') as fp:
fp.write("".join(updated))
def Projections(latpos, soc, valence):
atoms = []
for at, pos in latpos:
atoms.append(at)
distinct_at = _distinct_atoms(atoms)
lines = []
if soc:
lines.append("spinors = T")
lines.append("begin projections")
for at in distinct_at:
at_proj = _proj_line(at, valence[at])
lines.append(at_proj)
lines.append("end projections")
lines.append("")
return lines
def _proj_line(at, valence):
line = "{}: ".format(at)
for val_index, this_val in enumerate(valence):
if val_index != 0:
line += ";"
if this_val == "s":
line += "l=0"
elif this_val == "p":
line += "l=1"
elif this_val == "d":
line += "l=2"
else:
raise ValueError("unrecognized valence component")
return line
def _distinct_atoms(atoms):
distinct_at = []
for at_index, at in enumerate(atoms):
if at not in distinct_at:
distinct_at.append(at)
return distinct_at
def Spin(spin_polarized):
if not spin_polarized:
return [], None
else:
lines_up = ["spin = up", ""]
lines_down = ["spin = down", ""]
return lines_up, lines_down
def UnitCell(axes, alat, abohr):
lines = ["begin unit_cell_cart"]
lines.append("bohr")
for i in range(3):
ix = (alat / abohr) * axes[i][0]
iy = (alat / abohr) * axes[i][1]
iz = (alat / abohr) * axes[i][2]
lines.append(" {} {} {}".format(str(ix), str(iy), str(iz)))
lines.append("end unit_cell_cart")
lines.append("")
return lines
def AtomPos(latpos):
lines = ["begin atoms_frac"]
for atom, pos in latpos:
lines.append(" {} {} {} {}".format(atom, str(pos[0]), str(pos[1]), str(pos[2])))
lines.append("end atoms_frac")
lines.append("")
return lines
def Kpoints(Nk1, Nk2):
lines = ["mp_grid = {} {} 1".format(Nk1, Nk2)]
lines.append("")
lines.append("begin kpoints")
lines.extend(ks_strs(Nk1, Nk2))
lines.append("end kpoints")
lines.append("")
return lines
def ks_strs(Nk1, Nk2):
ks_lists = nscf_ks(Nk1, Nk2)
ret = []
for ks in ks_lists:
ret.append("{} {} {}".format(ks[0], ks[1], ks[2]))
return ret
def _get_num_wann(maxl):
num = 0
for c in maxl:
if c == "0":
num += 1
elif c == "1":
num += 4
elif c == "2":
num += 9
else:
raise ValueError("unexpected value in maxl")
return num
def Winfile(material):
spin_polarized = False # TODO - handle case with spin polarization on and soc off
soc = material["soc"]
if soc and spin_polarized:
raise ValueError("Cannot specify both spin_polarized and soc.")
nbnd = wannier_num_bands(material["valence"])
num_wann = material["valence"]["total"]
lines = Header(nbnd, num_wann, soc)
lines.extend(Disentanglement())
axes, latpos = cell.build(material["latconst"], material["latvecs"],
material["cartpos"], material["vacuum_dist"])
alat = material["latconst"]
abohr = 0.52917721
lines.extend(Projections(latpos, soc, material["valence"]))
lines.extend(UnitCell(axes, alat, abohr))
lines.extend(AtomPos(latpos))
Nk1, Nk2 = material["nscf_Nk1"], material["nscf_Nk2"]
lines.extend(Kpoints(Nk1, Nk2))
spin_up, spin_down = Spin(spin_polarized)
if not spin_polarized:
lines.extend(spin_up)
lines_str = "\n".join(lines) + "\n"
return lines_str, None
else:
lines_down = deepcopy(lines)
lines.extend(spin_up)
lines_down.extend(spin_down)
lines_str = "\n".join(lines) + "\n"
lines_down_str = "\n".join(lines_down) + "\n"
return lines_str, lines_down_str
|
|
# -*- coding: utf-8 -*-
from requests.auth import AuthBase
import requests
import json
import hashlib
import sys
import os
import getpass
from clint.textui import colored
from common import output
import netrc
STASH_HOST = 'http://getstash.herokuapp.com'
if 'STASH_HOST' in os.environ:
STASH_HOST = os.environ['STASH_HOST']
class DuplicateKeyword(Exception):
"""
Key already exist
"""
pass
class WrongArgumentsSet(Exception):
"""
Not enough arguments
"""
pass
class WrongKey(Exception):
"""
Key not found
"""
pass
class NoInternetConnection(Exception):
"""
No Internet connection or server not available
"""
pass
class ServerError(Exception):
"""
Server error
"""
pass
class UnknownServerError(Exception):
"""
Unknown server error
"""
pass
class WrongCredentials(Exception):
pass
class TokenAuth(AuthBase):
"""Attaches HTTP Token Authentication to the given Request object."""
def __init__(self, username, password):
# setup any auth-related data here
self.username = username
self.password = password
def __call__(self, r):
# modify and return the request
r.headers['X-Token'] = self.password
return r
class AlreadyLoggedIn(Exception):
pass
class API(object):
username = None
token = None
def check_login(self):
"""
Check if user logged in. If True - return login and token, else returns None
"""
netrc_path = os.path.join(os.path.expanduser('~'), '.netrc')
if not os.path.exists(netrc_path):
open(netrc_path, 'w').close()
info = netrc.netrc()
login, account, password = info.authenticators(STASH_HOST) or (None, None, None)
if password and login:
if self.username is None or self.token is None:
self.username = login
# todo: why token is equal to password?
self.token = password
return login, password
return None
def login_decorator(fn):
def wrapper(*args, **kwargs):
if len(args) > 0 and isinstance(args[0], API):
if args[0].check_login() is not None:
return fn(*args, **kwargs)
raise Exception('Unknown credentials.\nTry to do stash login at first.\n')
#output('Unknown credentials.\nTry to do stash login at first.\n', color='yellow')
return wrapper
def send_request_decorator(fn):
"""
Request decorator (avoiding code duplication)
"""
def wrapper(self, *args):
data = fn(self, *args)
data.update(self.get_user_data())
url = STASH_HOST + '/api/json'
try:
data['token'] = self.token
headers = {'Stash-Token': self.token}
r = requests.post(url, data=json.dumps(data), headers=headers)
except requests.exceptions.ConnectionError:
raise NoInternetConnection
# todo: replace with regular python exceptions
if r.status_code == 404:
raise WrongKey
if r.status_code == 401:
raise WrongCredentials
if r.status_code == 500:
raise ServerError
if r.status_code == 200:
return r.json()
else:
return UnknownServerError
return wrapper
def get_user_data(self):
return {'user': self.username}
def login(self, login, password):
if self.check_login() is not None:
raise AlreadyLoggedIn
m = hashlib.new('md5')
m.update(password)
r = self.get_token(login, password)
#TODO check if r is an error (remove / from stash host for example)
if 'token' in r:
# todo: maybe we don't need this two lines?
self.username = login
self.token = r['token']
with open(os.path.join(os.environ['HOME'], ".netrc"), "a") as f:
f.write("machine " + STASH_HOST + " login " + login + " password " + str(r['token']) + "\n")
f.close()
else:
# todo: do something
pass
if 'error' in r:
raise Exception(r['error'])
return True
def logout(self):
"""
Clear .netrc record
"""
netrc_path = os.path.join(os.path.expanduser('~'), '.netrc')
if not os.path.exists(netrc_path):
open(netrc_path, 'w').close()
info = netrc.netrc()
if STASH_HOST in info.hosts:
del info.hosts[STASH_HOST]
else:
raise Exception('You haven\'t logged in yet')
with open(netrc_path, 'w') as f:
f.write(info.__repr__())
f.close()
return True
# ==========
@send_request_decorator
@login_decorator
def get(self, key):
return {'get': key}
@send_request_decorator
@login_decorator
def search(self, key):
return {'search': key}
@send_request_decorator
@login_decorator
def set(self, key, value, tags, overwrite=False,append=False):
return {'set': { key: value }, 'tags' : tags, 'overwrite': overwrite, 'append' : append}
@send_request_decorator
@login_decorator
def delete(self, key):
return {'delete': key}
@send_request_decorator
@login_decorator
def all(self):
return {'getkeys': True}
@send_request_decorator
@login_decorator
def gettags(self):
return {'gettags': True}
@send_request_decorator
@login_decorator
def tags(self, key):
return {'tags': key }
@send_request_decorator
@login_decorator
def push(self, list_title, value):
return {'push': {list_title: value}}
@send_request_decorator
def get_token(self, username, password):
return {'login': {username: password}}
# =========
@login_decorator
@send_request_decorator
def sync(self, local_db_data):
return { 'sync' : local_db_data }
@send_request_decorator
def get_token(self, username, password):
return {'login': {username: password}}
def push(self):
"""Push data to cloud"""
def pull(self):
"""Pull data from cloud"""
|
|
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2010-2011 OpenStack Foundation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""Model classes that form the core of instances functionality."""
from datetime import datetime
from novaclient import exceptions as nova_exceptions
from trove.common import cfg
from trove.common import exception
import trove.common.instance as rd_instance
from trove.common.remote import create_dns_client
from trove.common.remote import create_guest_client
from trove.common.remote import create_nova_client
from trove.common.remote import create_cinder_client
from trove.common import utils
from trove.extensions.security_group.models import SecurityGroup
from trove.extensions.security_group.models import SecurityGroupRule
from trove.db import get_db_api
from trove.db import models as dbmodels
from trove.backup.models import Backup
from trove.quota.quota import run_with_quotas
from trove.instance.tasks import InstanceTask
from trove.instance.tasks import InstanceTasks
from trove.taskmanager import api as task_api
from trove.openstack.common import log as logging
from trove.openstack.common.gettextutils import _
CONF = cfg.CONF
LOG = logging.getLogger(__name__)
def load_server(context, instance_id, server_id):
"""Loads a server or raises an exception."""
client = create_nova_client(context)
try:
server = client.servers.get(server_id)
except nova_exceptions.NotFound:
LOG.debug("Could not find nova server_id(%s)" % server_id)
raise exception.ComputeInstanceNotFound(instance_id=instance_id,
server_id=server_id)
except nova_exceptions.ClientException as e:
raise exception.TroveError(str(e))
return server
class InstanceStatus(object):
ACTIVE = "ACTIVE"
BLOCKED = "BLOCKED"
BUILD = "BUILD"
FAILED = "FAILED"
REBOOT = "REBOOT"
RESIZE = "RESIZE"
BACKUP = "BACKUP"
SHUTDOWN = "SHUTDOWN"
ERROR = "ERROR"
def validate_volume_size(size):
if size is None:
raise exception.VolumeSizeNotSpecified()
max_size = CONF.max_accepted_volume_size
if long(size) > max_size:
msg = ("Volume 'size' cannot exceed maximum "
"of %d Gb, %s cannot be accepted."
% (max_size, size))
raise exception.VolumeQuotaExceeded(msg)
def load_simple_instance_server_status(context, db_info):
"""Loads a server or raises an exception."""
if 'BUILDING' == db_info.task_status.action:
db_info.server_status = "BUILD"
db_info.addresses = {}
else:
client = create_nova_client(context)
try:
server = client.servers.get(db_info.compute_instance_id)
db_info.server_status = server.status
db_info.addresses = server.addresses
except nova_exceptions.NotFound:
db_info.server_status = "SHUTDOWN"
db_info.addresses = {}
# If the compute server is in any of these states we can't perform any
# actions (delete, resize, etc).
SERVER_INVALID_ACTION_STATUSES = ["BUILD", "REBOOT", "REBUILD", "RESIZE"]
# Statuses in which an instance can have an action performed.
VALID_ACTION_STATUSES = ["ACTIVE"]
# Invalid states to contact the agent
AGENT_INVALID_STATUSES = ["BUILD", "REBOOT", "RESIZE"]
class SimpleInstance(object):
"""A simple view of an instance.
This gets loaded directly from the local database, so its cheaper than
creating the fully loaded Instance.
"""
def __init__(self, context, db_info, service_status):
self.context = context
self.db_info = db_info
self.service_status = service_status
@property
def addresses(self):
#TODO(tim.simpson): Review whether we should keep this... its a mess.
if hasattr(self.db_info, 'addresses'):
return self.db_info.addresses
@property
def created(self):
return self.db_info.created
@property
def flavor_id(self):
# Flavor ID is a str in the 1.0 API.
return str(self.db_info.flavor_id)
@property
def hostname(self):
return self.db_info.hostname
@property
def id(self):
return self.db_info.id
@property
def tenant_id(self):
return self.db_info.tenant_id
@property
def is_building(self):
return self.status in [InstanceStatus.BUILD]
@property
def is_sql_running(self):
"""True if the service status indicates MySQL is up and running."""
return self.service_status.status in MYSQL_RESPONSIVE_STATUSES
@property
def name(self):
return self.db_info.name
@property
def server_id(self):
return self.db_info.compute_instance_id
@property
def status(self):
### Check for taskmanager errors.
if self.db_info.task_status.is_error:
return InstanceStatus.ERROR
### Check for taskmanager status.
ACTION = self.db_info.task_status.action
if 'BUILDING' == ACTION:
if 'ERROR' == self.db_info.server_status:
return InstanceStatus.ERROR
return InstanceStatus.BUILD
if 'REBOOTING' == ACTION:
return InstanceStatus.REBOOT
if 'RESIZING' == ACTION:
return InstanceStatus.RESIZE
### Check for server status.
if self.db_info.server_status in ["BUILD", "ERROR", "REBOOT",
"RESIZE"]:
return self.db_info.server_status
### Check if there is a backup running for this instance
if Backup.running(self.id):
return InstanceStatus.BACKUP
### Report as Shutdown while deleting, unless there's an error.
if 'DELETING' == ACTION:
if self.db_info.server_status in ["ACTIVE", "SHUTDOWN", "DELETED"]:
return InstanceStatus.SHUTDOWN
else:
LOG.error(_("While shutting down instance (%(instance)s): "
"server had status (%(status)s).") %
{'instance': self.id,
'status': self.db_info.server_status})
return InstanceStatus.ERROR
### Check against the service status.
# The service is only paused during a reboot.
if rd_instance.ServiceStatuses.PAUSED == self.service_status.status:
return InstanceStatus.REBOOT
# If the service status is NEW, then we are building.
if rd_instance.ServiceStatuses.NEW == self.service_status.status:
return InstanceStatus.BUILD
# For everything else we can look at the service status mapping.
return self.service_status.status.api_status
@property
def updated(self):
return self.db_info.updated
@property
def volume_id(self):
return self.db_info.volume_id
@property
def volume_size(self):
return self.db_info.volume_size
@property
def service_type(self):
return self.db_info.service_type
class DetailInstance(SimpleInstance):
"""A detailed view of an Instnace.
This loads a SimpleInstance and then adds additional data for the
instance from the guest.
"""
def __init__(self, context, db_info, service_status):
super(DetailInstance, self).__init__(context, db_info, service_status)
self._volume_used = None
@property
def volume_used(self):
return self._volume_used
@volume_used.setter
def volume_used(self, value):
self._volume_used = value
def get_db_info(context, id):
if context is None:
raise TypeError("Argument context not defined.")
elif id is None:
raise TypeError("Argument id not defined.")
try:
db_info = DBInstance.find_by(context=context, id=id, deleted=False)
except exception.NotFound:
raise exception.NotFound(uuid=id)
return db_info
def load_any_instance(context, id):
# Try to load an instance with a server.
# If that fails, try to load it without the server.
try:
return load_instance(BuiltInstance, context, id, needs_server=True)
except exception.UnprocessableEntity:
LOG.warn("Could not load instance %s." % id)
return load_instance(FreshInstance, context, id, needs_server=False)
def load_instance(cls, context, id, needs_server=False):
db_info = get_db_info(context, id)
if not needs_server:
# TODO(tim.simpson): When we have notifications this won't be
# necessary and instead we'll just use the server_status field from
# the instance table.
load_simple_instance_server_status(context, db_info)
server = None
else:
try:
server = load_server(context, db_info.id,
db_info.compute_instance_id)
#TODO(tim.simpson): Remove this hack when we have notifications!
db_info.server_status = server.status
db_info.addresses = server.addresses
except exception.ComputeInstanceNotFound:
LOG.error("COMPUTE ID = %s" % db_info.compute_instance_id)
raise exception.UnprocessableEntity("Instance %s is not ready." %
id)
service_status = InstanceServiceStatus.find_by(instance_id=id)
LOG.info("service status=%s" % service_status)
return cls(context, db_info, server, service_status)
def load_instance_with_guest(cls, context, id):
db_info = get_db_info(context, id)
load_simple_instance_server_status(context, db_info)
service_status = InstanceServiceStatus.find_by(instance_id=id)
LOG.info("service status=%s" % service_status)
instance = cls(context, db_info, service_status)
load_guest_info(instance, context, id)
return instance
def load_guest_info(instance, context, id):
if instance.status not in AGENT_INVALID_STATUSES:
guest = create_guest_client(context, id)
try:
volume_info = guest.get_volume_info()
instance.volume_used = volume_info['used']
except Exception as e:
LOG.error(e)
return instance
class BaseInstance(SimpleInstance):
"""Represents an instance."""
def __init__(self, context, db_info, server, service_status):
super(BaseInstance, self).__init__(context, db_info, service_status)
self.server = server
self._guest = None
self._nova_client = None
self._volume_client = None
def get_guest(self):
return create_guest_client(self.context, self.db_info.id)
def delete(self):
def _delete_resources():
if self.is_building:
raise exception.UnprocessableEntity("Instance %s is not ready."
% self.id)
LOG.debug(_(" ... deleting compute id = %s") %
self.db_info.compute_instance_id)
LOG.debug(_(" ... setting status to DELETING."))
self.update_db(task_status=InstanceTasks.DELETING)
task_api.API(self.context).delete_instance(self.id)
deltas = {'instances': -1}
if CONF.trove_volume_support:
deltas['volumes'] = -self.volume_size
return run_with_quotas(self.tenant_id,
deltas,
_delete_resources)
def _delete_resources(self, deleted_at):
pass
def delete_async(self):
deleted_at = datetime.utcnow()
self._delete_resources(deleted_at)
LOG.debug("Setting instance %s to deleted..." % self.id)
# Delete guest queue.
try:
guest = self.get_guest()
guest.delete_queue()
except Exception as ex:
LOG.warn(ex)
self.update_db(deleted=True, deleted_at=deleted_at,
task_status=InstanceTasks.NONE)
self.set_servicestatus_deleted()
# Delete associated security group
if CONF.trove_security_groups_support:
SecurityGroup.delete_for_instance(self.db_info.id,
self.context)
@property
def guest(self):
if not self._guest:
self._guest = self.get_guest()
return self._guest
@property
def nova_client(self):
if not self._nova_client:
self._nova_client = create_nova_client(self.context)
return self._nova_client
def update_db(self, **values):
self.db_info = DBInstance.find_by(id=self.id, deleted=False)
for key in values:
setattr(self.db_info, key, values[key])
self.db_info.save()
def set_servicestatus_deleted(self):
del_instance = InstanceServiceStatus.find_by(instance_id=self.id)
del_instance.set_status(rd_instance.ServiceStatuses.DELETED)
del_instance.save()
@property
def volume_client(self):
if not self._volume_client:
self._volume_client = create_cinder_client(self.context)
return self._volume_client
class FreshInstance(BaseInstance):
@classmethod
def load(cls, context, id):
return load_instance(cls, context, id, needs_server=False)
class BuiltInstance(BaseInstance):
@classmethod
def load(cls, context, id):
return load_instance(cls, context, id, needs_server=True)
class Instance(BuiltInstance):
"""Represents an instance.
The life span of this object should be limited. Do not store them or
pass them between threads.
"""
@classmethod
def create(cls, context, name, flavor_id, image_id,
databases, users, service_type, volume_size, backup_id,
availability_zone=None):
client = create_nova_client(context)
try:
flavor = client.flavors.get(flavor_id)
except nova_exceptions.NotFound:
raise exception.FlavorNotFound(uuid=flavor_id)
deltas = {'instances': 1}
if CONF.trove_volume_support:
validate_volume_size(volume_size)
deltas['volumes'] = volume_size
else:
if volume_size is not None:
raise exception.VolumeNotSupported()
ephemeral_support = CONF.device_path
if ephemeral_support and flavor.ephemeral == 0:
raise exception.LocalStorageNotSpecified(flavor=flavor_id)
def _create_resources():
security_groups = None
if backup_id is not None:
backup_info = Backup.get_by_id(context, backup_id)
if backup_info.is_running:
raise exception.BackupNotCompleteError(backup_id=backup_id)
location = backup_info.location
LOG.info(_("Checking if backup exist in '%s'") % location)
if not Backup.check_object_exist(context, location):
raise exception.BackupFileNotFound(location=location)
db_info = DBInstance.create(name=name, flavor_id=flavor_id,
tenant_id=context.tenant,
volume_size=volume_size,
service_type=service_type,
task_status=InstanceTasks.BUILDING)
LOG.debug(_("Tenant %(tenant)s created new "
"Trove instance %(db)s...") %
{'tenant': context.tenant, 'db': db_info.id})
service_status = InstanceServiceStatus.create(
instance_id=db_info.id,
status=rd_instance.ServiceStatuses.NEW)
if CONF.trove_dns_support:
dns_client = create_dns_client(context)
hostname = dns_client.determine_hostname(db_info.id)
db_info.hostname = hostname
db_info.save()
if CONF.trove_security_groups_support:
security_group = SecurityGroup.create_for_instance(
db_info.id,
context)
if CONF.trove_security_groups_rules_support:
SecurityGroupRule.create_sec_group_rule(
security_group,
CONF.trove_security_group_rule_protocol,
CONF.trove_security_group_rule_port,
CONF.trove_security_group_rule_port,
CONF.trove_security_group_rule_cidr,
context
)
security_groups = [security_group["name"]]
task_api.API(context).create_instance(db_info.id, name, flavor,
image_id, databases, users,
service_type, volume_size,
security_groups, backup_id,
availability_zone)
return SimpleInstance(context, db_info, service_status)
return run_with_quotas(context.tenant,
deltas,
_create_resources)
def resize_flavor(self, new_flavor_id):
self.validate_can_perform_action()
LOG.debug("resizing instance %s flavor to %s"
% (self.id, new_flavor_id))
# Validate that the flavor can be found and that it isn't the same size
# as the current one.
client = create_nova_client(self.context)
try:
new_flavor = client.flavors.get(new_flavor_id)
except nova_exceptions.NotFound:
raise exception.FlavorNotFound(uuid=new_flavor_id)
old_flavor = client.flavors.get(self.flavor_id)
new_flavor_size = new_flavor.ram
old_flavor_size = old_flavor.ram
if CONF.trove_volume_support:
if new_flavor.ephemeral != 0:
raise exception.LocalStorageNotSupported()
if new_flavor_size == old_flavor_size:
raise exception.CannotResizeToSameSize()
elif CONF.device_path is not None:
# ephemeral support enabled
if new_flavor.ephemeral == 0:
raise exception.LocalStorageNotSpecified(flavor=new_flavor_id)
if (new_flavor_size == old_flavor_size and
new_flavor.ephemeral == new_flavor.ephemeral):
raise exception.CannotResizeToSameSize()
# Set the task to RESIZING and begin the async call before returning.
self.update_db(task_status=InstanceTasks.RESIZING)
LOG.debug("Instance %s set to RESIZING." % self.id)
task_api.API(self.context).resize_flavor(self.id, old_flavor,
new_flavor)
def resize_volume(self, new_size):
def _resize_resources():
self.validate_can_perform_action()
LOG.info("Resizing volume of instance %s..." % self.id)
if not self.volume_size:
raise exception.BadRequest("Instance %s has no volume."
% self.id)
old_size = self.volume_size
if int(new_size) <= old_size:
msg = ("The new volume 'size' must be larger than the current "
"volume size of '%s'")
raise exception.BadRequest(msg % old_size)
# Set the task to Resizing before sending off to the taskmanager
self.update_db(task_status=InstanceTasks.RESIZING)
task_api.API(self.context).resize_volume(new_size, self.id)
new_size_l = long(new_size)
validate_volume_size(new_size_l)
return run_with_quotas(self.tenant_id,
{'volumes': new_size_l - self.volume_size},
_resize_resources)
def reboot(self):
self.validate_can_perform_action()
LOG.info("Rebooting instance %s..." % self.id)
self.update_db(task_status=InstanceTasks.REBOOTING)
task_api.API(self.context).reboot(self.id)
def restart(self):
self.validate_can_perform_action()
LOG.info("Restarting MySQL on instance %s..." % self.id)
# Set our local status since Nova might not change it quick enough.
#TODO(tim.simpson): Possible bad stuff can happen if this service
# shuts down before it can set status to NONE.
# We need a last updated time to mitigate this;
# after some period of tolerance, we'll assume the
# status is no longer in effect.
self.update_db(task_status=InstanceTasks.REBOOTING)
task_api.API(self.context).restart(self.id)
def migrate(self, host=None):
self.validate_can_perform_action()
LOG.info("Migrating instance id = %s, to host = %s" % (self.id, host))
self.update_db(task_status=InstanceTasks.MIGRATING)
task_api.API(self.context).migrate(self.id, host)
def reset_task_status(self):
LOG.info("Settting task status to NONE on instance %s..." % self.id)
self.update_db(task_status=InstanceTasks.NONE)
def validate_can_perform_action(self):
"""
Raises exception if an instance action cannot currently be performed.
"""
if self.db_info.server_status != 'ACTIVE':
status = self.db_info.server_status
elif self.db_info.task_status != InstanceTasks.NONE:
status = self.db_info.task_status
elif not self.service_status.status.action_is_allowed:
status = self.status
elif Backup.running(self.id):
status = InstanceStatus.BACKUP
else:
return
msg = ("Instance is not currently available for an action to be "
"performed (status was %s)." % status)
LOG.error(msg)
raise exception.UnprocessableEntity(msg)
def create_server_list_matcher(server_list):
# Returns a method which finds a server from the given list.
def find_server(instance_id, server_id):
matches = [server for server in server_list if server.id == server_id]
if len(matches) == 1:
return matches[0]
elif len(matches) < 1:
# The instance was not found in the list and
# this can happen if the instance is deleted from
# nova but still in trove database
raise exception.ComputeInstanceNotFound(
instance_id=instance_id, server_id=server_id)
else:
# Should never happen, but never say never.
LOG.error(_("Server %(server)s for instance %(instance)s was"
"found twice!") % {'server': server_id,
'instance': instance_id})
raise exception.TroveError(uuid=instance_id)
return find_server
class Instances(object):
DEFAULT_LIMIT = CONF.instances_page_size
@staticmethod
def load(context):
def load_simple_instance(context, db, status, **kwargs):
return SimpleInstance(context, db, status)
if context is None:
raise TypeError("Argument context not defined.")
client = create_nova_client(context)
servers = client.servers.list()
db_infos = DBInstance.find_all(tenant_id=context.tenant, deleted=False)
limit = int(context.limit or Instances.DEFAULT_LIMIT)
if limit > Instances.DEFAULT_LIMIT:
limit = Instances.DEFAULT_LIMIT
data_view = DBInstance.find_by_pagination('instances', db_infos, "foo",
limit=limit,
marker=context.marker)
next_marker = data_view.next_page_marker
find_server = create_server_list_matcher(servers)
for db in db_infos:
LOG.debug("checking for db [id=%s, compute_instance_id=%s]" %
(db.id, db.compute_instance_id))
ret = Instances._load_servers_status(load_simple_instance, context,
data_view.collection,
find_server)
return ret, next_marker
@staticmethod
def _load_servers_status(load_instance, context, db_items, find_server):
ret = []
for db in db_items:
server = None
try:
#TODO(tim.simpson): Delete when we get notifications working!
if InstanceTasks.BUILDING == db.task_status:
db.server_status = "BUILD"
else:
try:
server = find_server(db.id, db.compute_instance_id)
db.server_status = server.status
except exception.ComputeInstanceNotFound:
db.server_status = "SHUTDOWN" # Fake it...
#TODO(tim.simpson): End of hack.
#volumes = find_volumes(server.id)
status = InstanceServiceStatus.find_by(instance_id=db.id)
LOG.info(_("Server api_status(%s)") %
status.status.api_status)
if not status.status: # This should never happen.
LOG.error(_("Server status could not be read for "
"instance id(%s)") % db.id)
continue
except exception.ModelNotFoundError:
LOG.error(_("Server status could not be read for "
"instance id(%s)") % db.id)
continue
ret.append(load_instance(context, db, status, server=server))
return ret
class DBInstance(dbmodels.DatabaseModelBase):
"""Defines the task being executed plus the start time."""
#TODO(tim.simpson): Add start time.
_data_fields = ['name', 'created', 'compute_instance_id',
'task_id', 'task_description', 'task_start_time',
'volume_id', 'deleted', 'tenant_id', 'service_type']
def __init__(self, task_status, **kwargs):
kwargs["task_id"] = task_status.code
kwargs["task_description"] = task_status.db_text
kwargs["deleted"] = False
super(DBInstance, self).__init__(**kwargs)
self.set_task_status(task_status)
def _validate(self, errors):
if InstanceTask.from_code(self.task_id) is None:
errors['task_id'] = "Not valid."
if self.task_status is None:
errors['task_status'] = "Cannot be none."
def get_task_status(self):
return InstanceTask.from_code(self.task_id)
def set_task_status(self, value):
self.task_id = value.code
self.task_description = value.db_text
task_status = property(get_task_status, set_task_status)
class ServiceImage(dbmodels.DatabaseModelBase):
"""Defines the status of the service being run."""
_data_fields = ['service_name', 'image_id']
class InstanceServiceStatus(dbmodels.DatabaseModelBase):
_data_fields = ['instance_id', 'status_id', 'status_description',
'updated_at']
def __init__(self, status, **kwargs):
kwargs["status_id"] = status.code
kwargs["status_description"] = status.description
super(InstanceServiceStatus, self).__init__(**kwargs)
self.set_status(status)
def _validate(self, errors):
if self.status is None:
errors['status'] = "Cannot be none."
if rd_instance.ServiceStatus.from_code(self.status_id) is None:
errors['status_id'] = "Not valid."
def get_status(self):
return rd_instance.ServiceStatus.from_code(self.status_id)
def set_status(self, value):
self.status_id = value.code
self.status_description = value.description
def save(self):
self['updated_at'] = utils.utcnow()
return get_db_api().save(self)
status = property(get_status, set_status)
def persisted_models():
return {
'instance': DBInstance,
'service_image': ServiceImage,
'service_statuses': InstanceServiceStatus,
}
MYSQL_RESPONSIVE_STATUSES = [rd_instance.ServiceStatuses.RUNNING]
|
|
import textwrap
from io import StringIO
from itertools import chain
from typing import List, TextIO, Union
import isort.literal
from isort.settings import DEFAULT_CONFIG, Config
from . import output, parse
from .exceptions import FileSkipComment
from .format import format_natural, remove_whitespace
from .settings import FILE_SKIP_COMMENTS
CIMPORT_IDENTIFIERS = ("cimport ", "cimport*", "from.cimport")
IMPORT_START_IDENTIFIERS = ("from ", "from.import", "import ", "import*") + CIMPORT_IDENTIFIERS
COMMENT_INDICATORS = ('"""', "'''", "'", '"', "#")
CODE_SORT_COMMENTS = (
"# isort: list",
"# isort: dict",
"# isort: set",
"# isort: unique-list",
"# isort: tuple",
"# isort: unique-tuple",
"# isort: assignments",
)
def process(
input_stream: TextIO,
output_stream: TextIO,
extension: str = "py",
config: Config = DEFAULT_CONFIG,
) -> bool:
"""Parses stream identifying sections of contiguous imports and sorting them
Code with unsorted imports is read from the provided `input_stream`, sorted and then
outputted to the specified `output_stream`.
- `input_stream`: Text stream with unsorted import sections.
- `output_stream`: Text stream to output sorted inputs into.
- `config`: Config settings to use when sorting imports. Defaults settings.
- *Default*: `isort.settings.DEFAULT_CONFIG`.
- `extension`: The file extension or file extension rules that should be used.
- *Default*: `"py"`.
- *Choices*: `["py", "pyi", "pyx"]`.
Returns `True` if there were changes that needed to be made (errors present) from what
was provided in the input_stream, otherwise `False`.
"""
line_separator: str = config.line_ending
add_imports: List[str] = [format_natural(addition) for addition in config.add_imports]
import_section: str = ""
next_import_section: str = ""
next_cimports: bool = False
in_quote: str = ""
first_comment_index_start: int = -1
first_comment_index_end: int = -1
contains_imports: bool = False
in_top_comment: bool = False
first_import_section: bool = True
indent: str = ""
isort_off: bool = False
code_sorting: Union[bool, str] = False
code_sorting_section: str = ""
code_sorting_indent: str = ""
cimports: bool = False
made_changes: bool = False
stripped_line: str = ""
end_of_file: bool = False
verbose_output: List[str] = []
if config.float_to_top:
new_input = ""
current = ""
isort_off = False
for line in chain(input_stream, (None,)):
if isort_off and line is not None:
if line == "# isort: on\n":
isort_off = False
new_input += line
elif line in ("# isort: split\n", "# isort: off\n", None) or str(line).endswith(
"# isort: split\n"
):
if line == "# isort: off\n":
isort_off = True
if current:
if add_imports:
add_line_separator = line_separator or "\n"
current += add_line_separator + add_line_separator.join(add_imports)
add_imports = []
parsed = parse.file_contents(current, config=config)
verbose_output += parsed.verbose_output
extra_space = ""
while current and current[-1] == "\n":
extra_space += "\n"
current = current[:-1]
extra_space = extra_space.replace("\n", "", 1)
sorted_output = output.sorted_imports(
parsed, config, extension, import_type="import"
)
made_changes = made_changes or _has_changed(
before=current,
after=sorted_output,
line_separator=parsed.line_separator,
ignore_whitespace=config.ignore_whitespace,
)
new_input += sorted_output
new_input += extra_space
current = ""
new_input += line or ""
else:
current += line or ""
input_stream = StringIO(new_input)
for index, line in enumerate(chain(input_stream, (None,))):
if line is None:
if index == 0 and not config.force_adds:
return False
not_imports = True
end_of_file = True
line = ""
if not line_separator:
line_separator = "\n"
if code_sorting and code_sorting_section:
sorted_code = textwrap.indent(
isort.literal.assignment(
code_sorting_section,
str(code_sorting),
extension,
config=_indented_config(config, indent),
),
code_sorting_indent,
)
made_changes = made_changes or _has_changed(
before=code_sorting_section,
after=sorted_code,
line_separator=line_separator,
ignore_whitespace=config.ignore_whitespace,
)
output_stream.write(sorted_code)
else:
stripped_line = line.strip()
if stripped_line and not line_separator:
line_separator = line[len(line.rstrip()) :].replace(" ", "").replace("\t", "")
for file_skip_comment in FILE_SKIP_COMMENTS:
if file_skip_comment in line:
raise FileSkipComment("Passed in content")
if not in_quote and stripped_line == "# isort: off":
isort_off = True
if (
(index == 0 or (index in (1, 2) and not contains_imports))
and stripped_line.startswith("#")
and stripped_line not in config.section_comments
):
in_top_comment = True
elif in_top_comment and (
not line.startswith("#") or stripped_line in config.section_comments
):
in_top_comment = False
first_comment_index_end = index - 1
was_in_quote = bool(in_quote)
if (not stripped_line.startswith("#") or in_quote) and '"' in line or "'" in line:
char_index = 0
if first_comment_index_start == -1 and (
line.startswith('"') or line.startswith("'")
):
first_comment_index_start = index
while char_index < len(line):
if line[char_index] == "\\":
char_index += 1
elif in_quote:
if line[char_index : char_index + len(in_quote)] == in_quote:
in_quote = ""
if first_comment_index_end < first_comment_index_start:
first_comment_index_end = index
elif line[char_index] in ("'", '"'):
long_quote = line[char_index : char_index + 3]
if long_quote in ('"""', "'''"):
in_quote = long_quote
char_index += 2
else:
in_quote = line[char_index]
elif line[char_index] == "#":
break
char_index += 1
not_imports = bool(in_quote) or was_in_quote or in_top_comment or isort_off
if not (in_quote or was_in_quote or in_top_comment):
if isort_off:
if stripped_line == "# isort: on":
isort_off = False
elif stripped_line.endswith("# isort: split"):
not_imports = True
elif stripped_line in CODE_SORT_COMMENTS:
code_sorting = stripped_line.split("isort: ")[1].strip()
code_sorting_indent = line[: -len(line.lstrip())]
not_imports = True
elif code_sorting:
if not stripped_line:
sorted_code = textwrap.indent(
isort.literal.assignment(
code_sorting_section,
str(code_sorting),
extension,
config=_indented_config(config, indent),
),
code_sorting_indent,
)
made_changes = made_changes or _has_changed(
before=code_sorting_section,
after=sorted_code,
line_separator=line_separator,
ignore_whitespace=config.ignore_whitespace,
)
output_stream.write(sorted_code)
not_imports = True
code_sorting = False
code_sorting_section = ""
code_sorting_indent = ""
else:
code_sorting_section += line
line = ""
elif stripped_line in config.section_comments:
if import_section and not contains_imports:
output_stream.write(import_section)
import_section = line
not_imports = False
else:
import_section += line
indent = line[: -len(line.lstrip())]
elif not (stripped_line or contains_imports):
not_imports = True
elif (
not stripped_line
or stripped_line.startswith("#")
and (not indent or indent + line.lstrip() == line)
and not config.treat_all_comments_as_code
and stripped_line not in config.treat_comments_as_code
):
import_section += line
elif stripped_line.startswith(IMPORT_START_IDENTIFIERS):
new_indent = line[: -len(line.lstrip())]
import_statement = line
stripped_line = line.strip().split("#")[0]
while stripped_line.endswith("\\") or (
"(" in stripped_line and ")" not in stripped_line
):
if stripped_line.endswith("\\"):
while stripped_line and stripped_line.endswith("\\"):
line = input_stream.readline()
stripped_line = line.strip().split("#")[0]
import_statement += line
else:
while ")" not in stripped_line:
line = input_stream.readline()
stripped_line = line.strip().split("#")[0]
import_statement += line
if (
import_statement.lstrip().startswith("from")
and "import" not in import_statement
):
line = import_statement
not_imports = True
else:
did_contain_imports = contains_imports
contains_imports = True
cimport_statement: bool = False
if (
import_statement.lstrip().startswith(CIMPORT_IDENTIFIERS)
or " cimport " in import_statement
or " cimport*" in import_statement
or " cimport(" in import_statement
or ".cimport" in import_statement
):
cimport_statement = True
if cimport_statement != cimports or (
new_indent != indent
and import_section
and (not did_contain_imports or len(new_indent) < len(indent))
):
indent = new_indent
if import_section:
next_cimports = cimport_statement
next_import_section = import_statement
import_statement = ""
not_imports = True
line = ""
else:
cimports = cimport_statement
else:
if new_indent != indent:
if import_section and did_contain_imports:
import_statement = indent + import_statement.lstrip()
else:
indent = new_indent
import_section += import_statement
else:
not_imports = True
if not_imports:
raw_import_section: str = import_section
if (
add_imports
and (stripped_line or end_of_file)
and not config.append_only
and not in_top_comment
and not in_quote
and not import_section
and not line.lstrip().startswith(COMMENT_INDICATORS)
):
import_section = line_separator.join(add_imports) + line_separator
if end_of_file and index != 0:
output_stream.write(line_separator)
contains_imports = True
add_imports = []
if next_import_section and not import_section: # pragma: no cover
raw_import_section = import_section = next_import_section
next_import_section = ""
if import_section:
if add_imports and not indent:
import_section = (
line_separator.join(add_imports) + line_separator + import_section
)
contains_imports = True
add_imports = []
if not indent:
import_section += line
raw_import_section += line
if not contains_imports:
output_stream.write(import_section)
else:
leading_whitespace = import_section[: -len(import_section.lstrip())]
trailing_whitespace = import_section[len(import_section.rstrip()) :]
if first_import_section and not import_section.lstrip(
line_separator
).startswith(COMMENT_INDICATORS):
import_section = import_section.lstrip(line_separator)
raw_import_section = raw_import_section.lstrip(line_separator)
first_import_section = False
if indent:
import_section = "".join(
line[len(indent) :] for line in import_section.splitlines(keepends=True)
)
parsed_content = parse.file_contents(import_section, config=config)
verbose_output += parsed_content.verbose_output
sorted_import_section = output.sorted_imports(
parsed_content,
_indented_config(config, indent),
extension,
import_type="cimport" if cimports else "import",
)
if not (import_section.strip() and not sorted_import_section):
if indent:
sorted_import_section = (
leading_whitespace
+ textwrap.indent(sorted_import_section, indent).strip()
+ trailing_whitespace
)
made_changes = made_changes or _has_changed(
before=raw_import_section,
after=sorted_import_section,
line_separator=line_separator,
ignore_whitespace=config.ignore_whitespace,
)
output_stream.write(sorted_import_section)
if not line and not indent and next_import_section:
output_stream.write(line_separator)
if indent:
output_stream.write(line)
if not next_import_section:
indent = ""
if next_import_section:
cimports = next_cimports
contains_imports = True
else:
contains_imports = False
import_section = next_import_section
next_import_section = ""
else:
output_stream.write(line)
not_imports = False
if stripped_line and not in_quote and not import_section and not next_import_section:
if stripped_line == "yield":
while not stripped_line or stripped_line == "yield":
new_line = input_stream.readline()
if not new_line:
break
output_stream.write(new_line)
stripped_line = new_line.strip().split("#")[0]
if stripped_line.startswith("raise") or stripped_line.startswith("yield"):
while stripped_line.endswith("\\"):
new_line = input_stream.readline()
if not new_line:
break
output_stream.write(new_line)
stripped_line = new_line.strip().split("#")[0]
if made_changes and config.only_modified:
for output_str in verbose_output:
print(output_str)
return made_changes
def _indented_config(config: Config, indent: str):
if not indent:
return config
return Config(
config=config,
line_length=max(config.line_length - len(indent), 0),
wrap_length=max(config.wrap_length - len(indent), 0),
lines_after_imports=1,
import_headings=config.import_headings if config.indented_import_headings else {},
)
def _has_changed(before: str, after: str, line_separator: str, ignore_whitespace: bool) -> bool:
if ignore_whitespace:
return (
remove_whitespace(before, line_separator=line_separator).strip()
!= remove_whitespace(after, line_separator=line_separator).strip()
)
return before.strip() != after.strip()
|
|
"""Tests for gree component."""
from datetime import timedelta
from greeclimate.device import HorizontalSwing, VerticalSwing
from greeclimate.exceptions import DeviceNotBoundError, DeviceTimeoutError
import pytest
from homeassistant.components.climate.const import (
ATTR_FAN_MODE,
ATTR_HVAC_MODE,
ATTR_PRESET_MODE,
ATTR_SWING_MODE,
DOMAIN,
FAN_AUTO,
FAN_HIGH,
FAN_LOW,
FAN_MEDIUM,
HVAC_MODE_AUTO,
HVAC_MODE_COOL,
HVAC_MODE_DRY,
HVAC_MODE_FAN_ONLY,
HVAC_MODE_HEAT,
HVAC_MODE_OFF,
PRESET_AWAY,
PRESET_BOOST,
PRESET_ECO,
PRESET_NONE,
PRESET_SLEEP,
SERVICE_SET_FAN_MODE,
SERVICE_SET_HVAC_MODE,
SERVICE_SET_PRESET_MODE,
SERVICE_SET_SWING_MODE,
SERVICE_SET_TEMPERATURE,
SWING_BOTH,
SWING_HORIZONTAL,
SWING_OFF,
SWING_VERTICAL,
)
from homeassistant.components.gree.climate import (
FAN_MODES_REVERSE,
HVAC_MODES_REVERSE,
SUPPORTED_FEATURES,
)
from homeassistant.components.gree.const import (
DOMAIN as GREE_DOMAIN,
FAN_MEDIUM_HIGH,
FAN_MEDIUM_LOW,
)
from homeassistant.const import (
ATTR_ENTITY_ID,
ATTR_FRIENDLY_NAME,
ATTR_SUPPORTED_FEATURES,
ATTR_TEMPERATURE,
STATE_UNAVAILABLE,
)
from homeassistant.setup import async_setup_component
import homeassistant.util.dt as dt_util
from .common import build_device_mock
from tests.async_mock import DEFAULT as DEFAULT_MOCK, AsyncMock, patch
from tests.common import MockConfigEntry, async_fire_time_changed
ENTITY_ID = f"{DOMAIN}.fake_device_1"
@pytest.fixture
def mock_now():
"""Fixture for dtutil.now."""
return dt_util.utcnow()
async def async_setup_gree(hass):
"""Set up the gree platform."""
MockConfigEntry(domain=GREE_DOMAIN).add_to_hass(hass)
await async_setup_component(hass, GREE_DOMAIN, {GREE_DOMAIN: {"climate": {}}})
await hass.async_block_till_done()
async def test_discovery_called_once(hass, discovery, device):
"""Test discovery is only ever called once."""
await async_setup_gree(hass)
assert discovery.call_count == 1
await async_setup_gree(hass)
assert discovery.call_count == 1
async def test_discovery_setup(hass, discovery, device):
"""Test setup of platform."""
MockDevice1 = build_device_mock(
name="fake-device-1", ipAddress="1.1.1.1", mac="aabbcc112233"
)
MockDevice2 = build_device_mock(
name="fake-device-2", ipAddress="2.2.2.2", mac="bbccdd223344"
)
discovery.return_value = [MockDevice1.device_info, MockDevice2.device_info]
device.side_effect = [MockDevice1, MockDevice2]
await async_setup_gree(hass)
await hass.async_block_till_done()
assert discovery.call_count == 1
assert len(hass.states.async_all(DOMAIN)) == 2
async def test_discovery_setup_connection_error(hass, discovery, device):
"""Test gree integration is setup."""
MockDevice1 = build_device_mock(name="fake-device-1")
MockDevice1.bind = AsyncMock(side_effect=DeviceNotBoundError)
MockDevice2 = build_device_mock(name="fake-device-2")
MockDevice2.bind = AsyncMock(side_effect=DeviceNotBoundError)
device.side_effect = [MockDevice1, MockDevice2]
await async_setup_gree(hass)
await hass.async_block_till_done()
assert discovery.call_count == 1
assert not hass.states.async_all(DOMAIN)
async def test_update_connection_failure(hass, discovery, device, mock_now):
"""Testing update hvac connection failure exception."""
device().update_state.side_effect = [
DEFAULT_MOCK,
DeviceTimeoutError,
DeviceTimeoutError,
]
await async_setup_gree(hass)
next_update = mock_now + timedelta(minutes=5)
with patch("homeassistant.util.dt.utcnow", return_value=next_update):
async_fire_time_changed(hass, next_update)
await hass.async_block_till_done()
# First update to make the device available
state = hass.states.get(ENTITY_ID)
assert state.name == "fake-device-1"
assert state.state != STATE_UNAVAILABLE
next_update = mock_now + timedelta(minutes=10)
with patch("homeassistant.util.dt.utcnow", return_value=next_update):
async_fire_time_changed(hass, next_update)
await hass.async_block_till_done()
next_update = mock_now + timedelta(minutes=15)
with patch("homeassistant.util.dt.utcnow", return_value=next_update):
async_fire_time_changed(hass, next_update)
await hass.async_block_till_done()
# Then two more update failures to make the device unavailable
state = hass.states.get(ENTITY_ID)
assert state.name == "fake-device-1"
assert state.state == STATE_UNAVAILABLE
async def test_update_connection_failure_recovery(hass, discovery, device, mock_now):
"""Testing update hvac connection failure recovery."""
device().update_state.side_effect = [DeviceTimeoutError, DEFAULT_MOCK]
await async_setup_gree(hass)
next_update = mock_now + timedelta(minutes=5)
with patch("homeassistant.util.dt.utcnow", return_value=next_update):
async_fire_time_changed(hass, next_update)
await hass.async_block_till_done()
state = hass.states.get(ENTITY_ID)
assert state.name == "fake-device-1"
assert state.state == STATE_UNAVAILABLE
next_update = mock_now + timedelta(minutes=10)
with patch("homeassistant.util.dt.utcnow", return_value=next_update):
async_fire_time_changed(hass, next_update)
await hass.async_block_till_done()
state = hass.states.get(ENTITY_ID)
assert state.name == "fake-device-1"
assert state.state != STATE_UNAVAILABLE
async def test_update_unhandled_exception(hass, discovery, device, mock_now):
"""Testing update hvac connection unhandled response exception."""
device().update_state.side_effect = [DEFAULT_MOCK, Exception]
await async_setup_gree(hass)
next_update = mock_now + timedelta(minutes=5)
with patch("homeassistant.util.dt.utcnow", return_value=next_update):
async_fire_time_changed(hass, next_update)
await hass.async_block_till_done()
state = hass.states.get(ENTITY_ID)
assert state.name == "fake-device-1"
assert state.state != STATE_UNAVAILABLE
next_update = mock_now + timedelta(minutes=10)
with patch("homeassistant.util.dt.utcnow", return_value=next_update):
async_fire_time_changed(hass, next_update)
await hass.async_block_till_done()
state = hass.states.get(ENTITY_ID)
assert state.name == "fake-device-1"
assert state.state == STATE_UNAVAILABLE
async def test_send_command_device_timeout(hass, discovery, device, mock_now):
"""Test for sending power on command to the device with a device timeout."""
await async_setup_gree(hass)
# First update to make the device available
next_update = mock_now + timedelta(minutes=5)
with patch("homeassistant.util.dt.utcnow", return_value=next_update):
async_fire_time_changed(hass, next_update)
await hass.async_block_till_done()
state = hass.states.get(ENTITY_ID)
assert state.name == "fake-device-1"
assert state.state != STATE_UNAVAILABLE
device().update_state.side_effect = DeviceTimeoutError
device().push_state_update.side_effect = DeviceTimeoutError
# Second update to make an initial error (device is still available)
next_update = mock_now + timedelta(minutes=5)
with patch("homeassistant.util.dt.utcnow", return_value=next_update):
async_fire_time_changed(hass, next_update)
await hass.async_block_till_done()
state = hass.states.get(ENTITY_ID)
assert state is not None
assert state.name == "fake-device-1"
assert state.state != STATE_UNAVAILABLE
# Second attempt should make the device unavailable
assert await hass.services.async_call(
DOMAIN,
SERVICE_SET_HVAC_MODE,
{ATTR_ENTITY_ID: ENTITY_ID, ATTR_HVAC_MODE: HVAC_MODE_AUTO},
blocking=True,
)
await hass.async_block_till_done()
state = hass.states.get(ENTITY_ID)
assert state is not None
assert state.state == STATE_UNAVAILABLE
async def test_send_command_device_unknown_error(hass, discovery, device, mock_now):
"""Test for sending power on command to the device with a device timeout."""
device().update_state.side_effect = [DEFAULT_MOCK, Exception]
device().push_state_update.side_effect = Exception
await async_setup_gree(hass)
next_update = mock_now + timedelta(minutes=5)
with patch("homeassistant.util.dt.utcnow", return_value=next_update):
async_fire_time_changed(hass, next_update)
await hass.async_block_till_done()
# First update to make the device available
state = hass.states.get(ENTITY_ID)
assert state.name == "fake-device-1"
assert state.state != STATE_UNAVAILABLE
assert await hass.services.async_call(
DOMAIN,
SERVICE_SET_HVAC_MODE,
{ATTR_ENTITY_ID: ENTITY_ID, ATTR_HVAC_MODE: HVAC_MODE_AUTO},
blocking=True,
)
state = hass.states.get(ENTITY_ID)
assert state is not None
assert state.state == STATE_UNAVAILABLE
async def test_send_power_on(hass, discovery, device, mock_now):
"""Test for sending power on command to the device."""
await async_setup_gree(hass)
next_update = mock_now + timedelta(minutes=5)
with patch("homeassistant.util.dt.utcnow", return_value=next_update):
async_fire_time_changed(hass, next_update)
await hass.async_block_till_done()
assert await hass.services.async_call(
DOMAIN,
SERVICE_SET_HVAC_MODE,
{ATTR_ENTITY_ID: ENTITY_ID, ATTR_HVAC_MODE: HVAC_MODE_AUTO},
blocking=True,
)
state = hass.states.get(ENTITY_ID)
assert state is not None
assert state.state == HVAC_MODE_AUTO
async def test_send_power_on_device_timeout(hass, discovery, device, mock_now):
"""Test for sending power on command to the device with a device timeout."""
device().push_state_update.side_effect = DeviceTimeoutError
await async_setup_gree(hass)
next_update = mock_now + timedelta(minutes=5)
with patch("homeassistant.util.dt.utcnow", return_value=next_update):
async_fire_time_changed(hass, next_update)
await hass.async_block_till_done()
assert await hass.services.async_call(
DOMAIN,
SERVICE_SET_HVAC_MODE,
{ATTR_ENTITY_ID: ENTITY_ID, ATTR_HVAC_MODE: HVAC_MODE_AUTO},
blocking=True,
)
state = hass.states.get(ENTITY_ID)
assert state is not None
assert state.state == HVAC_MODE_AUTO
async def test_send_target_temperature(hass, discovery, device, mock_now):
"""Test for sending target temperature command to the device."""
await async_setup_gree(hass)
next_update = mock_now + timedelta(minutes=5)
with patch("homeassistant.util.dt.utcnow", return_value=next_update):
async_fire_time_changed(hass, next_update)
await hass.async_block_till_done()
assert await hass.services.async_call(
DOMAIN,
SERVICE_SET_TEMPERATURE,
{ATTR_ENTITY_ID: ENTITY_ID, ATTR_TEMPERATURE: 25.1},
blocking=True,
)
state = hass.states.get(ENTITY_ID)
assert state is not None
assert state.attributes.get(ATTR_TEMPERATURE) == 25
async def test_send_target_temperature_device_timeout(
hass, discovery, device, mock_now
):
"""Test for sending target temperature command to the device with a device timeout."""
device().push_state_update.side_effect = DeviceTimeoutError
await async_setup_gree(hass)
next_update = mock_now + timedelta(minutes=5)
with patch("homeassistant.util.dt.utcnow", return_value=next_update):
async_fire_time_changed(hass, next_update)
await hass.async_block_till_done()
assert await hass.services.async_call(
DOMAIN,
SERVICE_SET_TEMPERATURE,
{ATTR_ENTITY_ID: ENTITY_ID, ATTR_TEMPERATURE: 25.1},
blocking=True,
)
state = hass.states.get(ENTITY_ID)
assert state is not None
assert state.attributes.get(ATTR_TEMPERATURE) == 25
async def test_update_target_temperature(hass, discovery, device, mock_now):
"""Test for updating target temperature from the device."""
device().target_temperature = 32
await async_setup_gree(hass)
next_update = mock_now + timedelta(minutes=5)
with patch("homeassistant.util.dt.utcnow", return_value=next_update):
async_fire_time_changed(hass, next_update)
await hass.async_block_till_done()
state = hass.states.get(ENTITY_ID)
assert state is not None
assert state.attributes.get(ATTR_TEMPERATURE) == 32
@pytest.mark.parametrize(
"preset", (PRESET_AWAY, PRESET_ECO, PRESET_SLEEP, PRESET_BOOST, PRESET_NONE)
)
async def test_send_preset_mode(hass, discovery, device, mock_now, preset):
"""Test for sending preset mode command to the device."""
await async_setup_gree(hass)
next_update = mock_now + timedelta(minutes=5)
with patch("homeassistant.util.dt.utcnow", return_value=next_update):
async_fire_time_changed(hass, next_update)
await hass.async_block_till_done()
assert await hass.services.async_call(
DOMAIN,
SERVICE_SET_PRESET_MODE,
{ATTR_ENTITY_ID: ENTITY_ID, ATTR_PRESET_MODE: preset},
blocking=True,
)
state = hass.states.get(ENTITY_ID)
assert state is not None
assert state.attributes.get(ATTR_PRESET_MODE) == preset
async def test_send_invalid_preset_mode(hass, discovery, device, mock_now):
"""Test for sending preset mode command to the device."""
await async_setup_gree(hass)
next_update = mock_now + timedelta(minutes=5)
with patch("homeassistant.util.dt.utcnow", return_value=next_update):
async_fire_time_changed(hass, next_update)
await hass.async_block_till_done()
with pytest.raises(ValueError):
await hass.services.async_call(
DOMAIN,
SERVICE_SET_PRESET_MODE,
{ATTR_ENTITY_ID: ENTITY_ID, ATTR_PRESET_MODE: "invalid"},
blocking=True,
)
state = hass.states.get(ENTITY_ID)
assert state is not None
assert state.attributes.get(ATTR_PRESET_MODE) != "invalid"
@pytest.mark.parametrize(
"preset", (PRESET_AWAY, PRESET_ECO, PRESET_SLEEP, PRESET_BOOST, PRESET_NONE)
)
async def test_send_preset_mode_device_timeout(
hass, discovery, device, mock_now, preset
):
"""Test for sending preset mode command to the device with a device timeout."""
device().push_state_update.side_effect = DeviceTimeoutError
await async_setup_gree(hass)
next_update = mock_now + timedelta(minutes=5)
with patch("homeassistant.util.dt.utcnow", return_value=next_update):
async_fire_time_changed(hass, next_update)
await hass.async_block_till_done()
assert await hass.services.async_call(
DOMAIN,
SERVICE_SET_PRESET_MODE,
{ATTR_ENTITY_ID: ENTITY_ID, ATTR_PRESET_MODE: preset},
blocking=True,
)
state = hass.states.get(ENTITY_ID)
assert state is not None
assert state.attributes.get(ATTR_PRESET_MODE) == preset
@pytest.mark.parametrize(
"preset", (PRESET_AWAY, PRESET_ECO, PRESET_SLEEP, PRESET_BOOST, PRESET_NONE)
)
async def test_update_preset_mode(hass, discovery, device, mock_now, preset):
"""Test for updating preset mode from the device."""
device().steady_heat = preset == PRESET_AWAY
device().power_save = preset == PRESET_ECO
device().sleep = preset == PRESET_SLEEP
device().turbo = preset == PRESET_BOOST
await async_setup_gree(hass)
next_update = mock_now + timedelta(minutes=5)
with patch("homeassistant.util.dt.utcnow", return_value=next_update):
async_fire_time_changed(hass, next_update)
await hass.async_block_till_done()
state = hass.states.get(ENTITY_ID)
assert state is not None
assert state.attributes.get(ATTR_PRESET_MODE) == preset
@pytest.mark.parametrize(
"hvac_mode",
(
HVAC_MODE_OFF,
HVAC_MODE_AUTO,
HVAC_MODE_COOL,
HVAC_MODE_DRY,
HVAC_MODE_FAN_ONLY,
HVAC_MODE_HEAT,
),
)
async def test_send_hvac_mode(hass, discovery, device, mock_now, hvac_mode):
"""Test for sending hvac mode command to the device."""
await async_setup_gree(hass)
next_update = mock_now + timedelta(minutes=5)
with patch("homeassistant.util.dt.utcnow", return_value=next_update):
async_fire_time_changed(hass, next_update)
await hass.async_block_till_done()
assert await hass.services.async_call(
DOMAIN,
SERVICE_SET_HVAC_MODE,
{ATTR_ENTITY_ID: ENTITY_ID, ATTR_HVAC_MODE: hvac_mode},
blocking=True,
)
state = hass.states.get(ENTITY_ID)
assert state is not None
assert state.state == hvac_mode
@pytest.mark.parametrize(
"hvac_mode",
(HVAC_MODE_AUTO, HVAC_MODE_COOL, HVAC_MODE_DRY, HVAC_MODE_FAN_ONLY, HVAC_MODE_HEAT),
)
async def test_send_hvac_mode_device_timeout(
hass, discovery, device, mock_now, hvac_mode
):
"""Test for sending hvac mode command to the device with a device timeout."""
device().push_state_update.side_effect = DeviceTimeoutError
await async_setup_gree(hass)
next_update = mock_now + timedelta(minutes=5)
with patch("homeassistant.util.dt.utcnow", return_value=next_update):
async_fire_time_changed(hass, next_update)
await hass.async_block_till_done()
assert await hass.services.async_call(
DOMAIN,
SERVICE_SET_HVAC_MODE,
{ATTR_ENTITY_ID: ENTITY_ID, ATTR_HVAC_MODE: hvac_mode},
blocking=True,
)
state = hass.states.get(ENTITY_ID)
assert state is not None
assert state.state == hvac_mode
@pytest.mark.parametrize(
"hvac_mode",
(
HVAC_MODE_OFF,
HVAC_MODE_AUTO,
HVAC_MODE_COOL,
HVAC_MODE_DRY,
HVAC_MODE_FAN_ONLY,
HVAC_MODE_HEAT,
),
)
async def test_update_hvac_mode(hass, discovery, device, mock_now, hvac_mode):
"""Test for updating hvac mode from the device."""
device().power = hvac_mode != HVAC_MODE_OFF
device().mode = HVAC_MODES_REVERSE.get(hvac_mode)
await async_setup_gree(hass)
next_update = mock_now + timedelta(minutes=5)
with patch("homeassistant.util.dt.utcnow", return_value=next_update):
async_fire_time_changed(hass, next_update)
await hass.async_block_till_done()
state = hass.states.get(ENTITY_ID)
assert state is not None
assert state.state == hvac_mode
@pytest.mark.parametrize(
"fan_mode",
(FAN_AUTO, FAN_LOW, FAN_MEDIUM_LOW, FAN_MEDIUM, FAN_MEDIUM_HIGH, FAN_HIGH),
)
async def test_send_fan_mode(hass, discovery, device, mock_now, fan_mode):
"""Test for sending fan mode command to the device."""
await async_setup_gree(hass)
next_update = mock_now + timedelta(minutes=5)
with patch("homeassistant.util.dt.utcnow", return_value=next_update):
async_fire_time_changed(hass, next_update)
await hass.async_block_till_done()
assert await hass.services.async_call(
DOMAIN,
SERVICE_SET_FAN_MODE,
{ATTR_ENTITY_ID: ENTITY_ID, ATTR_FAN_MODE: fan_mode},
blocking=True,
)
state = hass.states.get(ENTITY_ID)
assert state is not None
assert state.attributes.get(ATTR_FAN_MODE) == fan_mode
async def test_send_invalid_fan_mode(hass, discovery, device, mock_now):
"""Test for sending fan mode command to the device."""
await async_setup_gree(hass)
next_update = mock_now + timedelta(minutes=5)
with patch("homeassistant.util.dt.utcnow", return_value=next_update):
async_fire_time_changed(hass, next_update)
await hass.async_block_till_done()
with pytest.raises(ValueError):
await hass.services.async_call(
DOMAIN,
SERVICE_SET_FAN_MODE,
{ATTR_ENTITY_ID: ENTITY_ID, ATTR_FAN_MODE: "invalid"},
blocking=True,
)
state = hass.states.get(ENTITY_ID)
assert state is not None
assert state.attributes.get(ATTR_FAN_MODE) != "invalid"
@pytest.mark.parametrize(
"fan_mode",
(FAN_AUTO, FAN_LOW, FAN_MEDIUM_LOW, FAN_MEDIUM, FAN_MEDIUM_HIGH, FAN_HIGH),
)
async def test_send_fan_mode_device_timeout(
hass, discovery, device, mock_now, fan_mode
):
"""Test for sending fan mode command to the device with a device timeout."""
device().push_state_update.side_effect = DeviceTimeoutError
await async_setup_gree(hass)
next_update = mock_now + timedelta(minutes=5)
with patch("homeassistant.util.dt.utcnow", return_value=next_update):
async_fire_time_changed(hass, next_update)
await hass.async_block_till_done()
assert await hass.services.async_call(
DOMAIN,
SERVICE_SET_FAN_MODE,
{ATTR_ENTITY_ID: ENTITY_ID, ATTR_FAN_MODE: fan_mode},
blocking=True,
)
state = hass.states.get(ENTITY_ID)
assert state is not None
assert state.attributes.get(ATTR_FAN_MODE) == fan_mode
@pytest.mark.parametrize(
"fan_mode",
(FAN_AUTO, FAN_LOW, FAN_MEDIUM_LOW, FAN_MEDIUM, FAN_MEDIUM_HIGH, FAN_HIGH),
)
async def test_update_fan_mode(hass, discovery, device, mock_now, fan_mode):
"""Test for updating fan mode from the device."""
device().fan_speed = FAN_MODES_REVERSE.get(fan_mode)
await async_setup_gree(hass)
next_update = mock_now + timedelta(minutes=5)
with patch("homeassistant.util.dt.utcnow", return_value=next_update):
async_fire_time_changed(hass, next_update)
await hass.async_block_till_done()
state = hass.states.get(ENTITY_ID)
assert state is not None
assert state.attributes.get(ATTR_FAN_MODE) == fan_mode
@pytest.mark.parametrize(
"swing_mode", (SWING_OFF, SWING_BOTH, SWING_VERTICAL, SWING_HORIZONTAL)
)
async def test_send_swing_mode(hass, discovery, device, mock_now, swing_mode):
"""Test for sending swing mode command to the device."""
await async_setup_gree(hass)
next_update = mock_now + timedelta(minutes=5)
with patch("homeassistant.util.dt.utcnow", return_value=next_update):
async_fire_time_changed(hass, next_update)
await hass.async_block_till_done()
assert await hass.services.async_call(
DOMAIN,
SERVICE_SET_SWING_MODE,
{ATTR_ENTITY_ID: ENTITY_ID, ATTR_SWING_MODE: swing_mode},
blocking=True,
)
state = hass.states.get(ENTITY_ID)
assert state is not None
assert state.attributes.get(ATTR_SWING_MODE) == swing_mode
async def test_send_invalid_swing_mode(hass, discovery, device, mock_now):
"""Test for sending swing mode command to the device."""
await async_setup_gree(hass)
next_update = mock_now + timedelta(minutes=5)
with patch("homeassistant.util.dt.utcnow", return_value=next_update):
async_fire_time_changed(hass, next_update)
await hass.async_block_till_done()
with pytest.raises(ValueError):
await hass.services.async_call(
DOMAIN,
SERVICE_SET_SWING_MODE,
{ATTR_ENTITY_ID: ENTITY_ID, ATTR_SWING_MODE: "invalid"},
blocking=True,
)
state = hass.states.get(ENTITY_ID)
assert state is not None
assert state.attributes.get(ATTR_SWING_MODE) != "invalid"
@pytest.mark.parametrize(
"swing_mode", (SWING_OFF, SWING_BOTH, SWING_VERTICAL, SWING_HORIZONTAL)
)
async def test_send_swing_mode_device_timeout(
hass, discovery, device, mock_now, swing_mode
):
"""Test for sending swing mode command to the device with a device timeout."""
device().push_state_update.side_effect = DeviceTimeoutError
await async_setup_gree(hass)
next_update = mock_now + timedelta(minutes=5)
with patch("homeassistant.util.dt.utcnow", return_value=next_update):
async_fire_time_changed(hass, next_update)
await hass.async_block_till_done()
assert await hass.services.async_call(
DOMAIN,
SERVICE_SET_SWING_MODE,
{ATTR_ENTITY_ID: ENTITY_ID, ATTR_SWING_MODE: swing_mode},
blocking=True,
)
state = hass.states.get(ENTITY_ID)
assert state is not None
assert state.attributes.get(ATTR_SWING_MODE) == swing_mode
@pytest.mark.parametrize(
"swing_mode", (SWING_OFF, SWING_BOTH, SWING_VERTICAL, SWING_HORIZONTAL)
)
async def test_update_swing_mode(hass, discovery, device, mock_now, swing_mode):
"""Test for updating swing mode from the device."""
device().horizontal_swing = (
HorizontalSwing.FullSwing
if swing_mode in (SWING_BOTH, SWING_HORIZONTAL)
else HorizontalSwing.Default
)
device().vertical_swing = (
VerticalSwing.FullSwing
if swing_mode in (SWING_BOTH, SWING_VERTICAL)
else VerticalSwing.Default
)
await async_setup_gree(hass)
next_update = mock_now + timedelta(minutes=5)
with patch("homeassistant.util.dt.utcnow", return_value=next_update):
async_fire_time_changed(hass, next_update)
await hass.async_block_till_done()
state = hass.states.get(ENTITY_ID)
assert state is not None
assert state.attributes.get(ATTR_SWING_MODE) == swing_mode
async def test_name(hass, discovery, device):
"""Test for name property."""
await async_setup_gree(hass)
state = hass.states.get(ENTITY_ID)
assert state.attributes[ATTR_FRIENDLY_NAME] == "fake-device-1"
async def test_supported_features_with_turnon(hass, discovery, device):
"""Test for supported_features property."""
await async_setup_gree(hass)
state = hass.states.get(ENTITY_ID)
assert state.attributes[ATTR_SUPPORTED_FEATURES] == SUPPORTED_FEATURES
|
|
import datetime
import os
import re
import time
import math
import django.utils.copycompat as copy
try:
import decimal
except ImportError:
from django.utils import _decimal as decimal # for Python 2.3
from django.db import connection
from django.db.models import signals
from django.db.models.query_utils import QueryWrapper
from django.dispatch import dispatcher
from django.conf import settings
from django import forms
from django.core import exceptions
from django.utils.datastructures import DictWrapper
from django.utils.functional import curry
from django.utils.itercompat import tee
from django.utils.text import capfirst
from django.utils.translation import ugettext_lazy, ugettext as _
from django.utils.encoding import smart_unicode, force_unicode, smart_str
from django.utils import datetime_safe
class NOT_PROVIDED:
pass
# The values to use for "blank" in SelectFields. Will be appended to the start of most "choices" lists.
BLANK_CHOICE_DASH = [("", "---------")]
BLANK_CHOICE_NONE = [("", "None")]
class FieldDoesNotExist(Exception):
pass
# A guide to Field parameters:
#
# * name: The name of the field specifed in the model.
# * attname: The attribute to use on the model object. This is the same as
# "name", except in the case of ForeignKeys, where "_id" is
# appended.
# * db_column: The db_column specified in the model (or None).
# * column: The database column for this field. This is the same as
# "attname", except if db_column is specified.
#
# Code that introspects values, or does other dynamic things, should use
# attname. For example, this gets the primary key value of object "obj":
#
# getattr(obj, opts.pk.attname)
class Field(object):
# Designates whether empty strings fundamentally are allowed at the
# database level.
empty_strings_allowed = True
# These track each time a Field instance is created. Used to retain order.
# The auto_creation_counter is used for fields that Django implicitly
# creates, creation_counter is used for all user-specified fields.
creation_counter = 0
auto_creation_counter = -1
# Generic field type description, usually overriden by subclasses
def _description(self):
return _(u'Field of type: %(field_type)s') % {
'field_type': self.__class__.__name__
}
description = property(_description)
def __init__(self, verbose_name=None, name=None, primary_key=False,
max_length=None, unique=False, blank=False, null=False,
db_index=False, rel=None, default=NOT_PROVIDED, editable=True,
serialize=True, unique_for_date=None, unique_for_month=None,
unique_for_year=None, choices=None, help_text='', db_column=None,
db_tablespace=None, auto_created=False):
self.name = name
self.verbose_name = verbose_name
self.primary_key = primary_key
self.max_length, self._unique = max_length, unique
self.blank, self.null = blank, null
# Oracle treats the empty string ('') as null, so coerce the null
# option whenever '' is a possible value.
if self.empty_strings_allowed and connection.features.interprets_empty_strings_as_nulls:
self.null = True
self.rel = rel
self.default = default
self.editable = editable
self.serialize = serialize
self.unique_for_date, self.unique_for_month = unique_for_date, unique_for_month
self.unique_for_year = unique_for_year
self._choices = choices or []
self.help_text = help_text
self.db_column = db_column
self.db_tablespace = db_tablespace or settings.DEFAULT_INDEX_TABLESPACE
self.auto_created = auto_created
# Set db_index to True if the field has a relationship and doesn't explicitly set db_index.
self.db_index = db_index
# Adjust the appropriate creation counter, and save our local copy.
if auto_created:
self.creation_counter = Field.auto_creation_counter
Field.auto_creation_counter -= 1
else:
self.creation_counter = Field.creation_counter
Field.creation_counter += 1
def __cmp__(self, other):
# This is needed because bisect does not take a comparison function.
return cmp(self.creation_counter, other.creation_counter)
def __deepcopy__(self, memodict):
# We don't have to deepcopy very much here, since most things are not
# intended to be altered after initial creation.
obj = copy.copy(self)
if self.rel:
obj.rel = copy.copy(self.rel)
memodict[id(self)] = obj
return obj
def to_python(self, value):
"""
Converts the input value into the expected Python data type, raising
django.core.exceptions.ValidationError if the data can't be converted.
Returns the converted value. Subclasses should override this.
"""
return value
def db_type(self):
"""
Returns the database column data type for this field, taking into
account the DATABASE_ENGINE setting.
"""
# The default implementation of this method looks at the
# backend-specific DATA_TYPES dictionary, looking up the field by its
# "internal type".
#
# A Field class can implement the get_internal_type() method to specify
# which *preexisting* Django Field class it's most similar to -- i.e.,
# an XMLField is represented by a TEXT column type, which is the same
# as the TextField Django field type, which means XMLField's
# get_internal_type() returns 'TextField'.
#
# But the limitation of the get_internal_type() / data_types approach
# is that it cannot handle database column types that aren't already
# mapped to one of the built-in Django field types. In this case, you
# can implement db_type() instead of get_internal_type() to specify
# exactly which wacky database column type you want to use.
data = DictWrapper(self.__dict__, connection.ops.quote_name, "qn_")
try:
return connection.creation.data_types[self.get_internal_type()] % data
except KeyError:
return None
def unique(self):
return self._unique or self.primary_key
unique = property(unique)
def set_attributes_from_name(self, name):
self.name = name
self.attname, self.column = self.get_attname_column()
if self.verbose_name is None and name:
self.verbose_name = name.replace('_', ' ')
def contribute_to_class(self, cls, name):
self.set_attributes_from_name(name)
cls._meta.add_field(self)
if self.choices:
setattr(cls, 'get_%s_display' % self.name, curry(cls._get_FIELD_display, field=self))
def get_attname(self):
return self.name
def get_attname_column(self):
attname = self.get_attname()
column = self.db_column or attname
return attname, column
def get_cache_name(self):
return '_%s_cache' % self.name
def get_internal_type(self):
return self.__class__.__name__
def pre_save(self, model_instance, add):
"Returns field's value just before saving."
return getattr(model_instance, self.attname)
def get_db_prep_value(self, value):
"""Returns field's value prepared for interacting with the database
backend.
Used by the default implementations of ``get_db_prep_save``and
`get_db_prep_lookup```
"""
return value
def get_db_prep_save(self, value):
"Returns field's value prepared for saving into a database."
return self.get_db_prep_value(value)
def get_db_prep_lookup(self, lookup_type, value):
"Returns field's value prepared for database lookup."
if hasattr(value, 'as_sql') or hasattr(value, '_as_sql'):
# If the value has a relabel_aliases method, it will need to
# be invoked before the final SQL is evaluated
if hasattr(value, 'relabel_aliases'):
return value
if hasattr(value, 'as_sql'):
sql, params = value.as_sql()
else:
sql, params = value._as_sql()
return QueryWrapper(('(%s)' % sql), params)
if lookup_type in ('regex', 'iregex', 'month', 'day', 'week_day', 'search'):
return [value]
elif lookup_type in ('exact', 'gt', 'gte', 'lt', 'lte'):
return [self.get_db_prep_value(value)]
elif lookup_type in ('range', 'in'):
return [self.get_db_prep_value(v) for v in value]
elif lookup_type in ('contains', 'icontains'):
return ["%%%s%%" % connection.ops.prep_for_like_query(value)]
elif lookup_type == 'iexact':
return [connection.ops.prep_for_iexact_query(value)]
elif lookup_type in ('startswith', 'istartswith'):
return ["%s%%" % connection.ops.prep_for_like_query(value)]
elif lookup_type in ('endswith', 'iendswith'):
return ["%%%s" % connection.ops.prep_for_like_query(value)]
elif lookup_type == 'isnull':
return []
elif lookup_type == 'year':
try:
value = int(value)
except ValueError:
raise ValueError("The __year lookup type requires an integer argument")
if self.get_internal_type() == 'DateField':
return connection.ops.year_lookup_bounds_for_date_field(value)
else:
return connection.ops.year_lookup_bounds(value)
raise TypeError("Field has invalid lookup: %s" % lookup_type)
def has_default(self):
"Returns a boolean of whether this field has a default value."
return self.default is not NOT_PROVIDED
def get_default(self):
"Returns the default value for this field."
if self.has_default():
if callable(self.default):
return self.default()
return force_unicode(self.default, strings_only=True)
if not self.empty_strings_allowed or (self.null and not connection.features.interprets_empty_strings_as_nulls):
return None
return ""
def get_validator_unique_lookup_type(self):
return '%s__exact' % self.name
def get_choices(self, include_blank=True, blank_choice=BLANK_CHOICE_DASH):
"""Returns choices with a default blank choices included, for use
as SelectField choices for this field."""
first_choice = include_blank and blank_choice or []
if self.choices:
return first_choice + list(self.choices)
rel_model = self.rel.to
if hasattr(self.rel, 'get_related_field'):
lst = [(getattr(x, self.rel.get_related_field().attname), smart_unicode(x)) for x in rel_model._default_manager.complex_filter(self.rel.limit_choices_to)]
else:
lst = [(x._get_pk_val(), smart_unicode(x)) for x in rel_model._default_manager.complex_filter(self.rel.limit_choices_to)]
return first_choice + lst
def get_choices_default(self):
return self.get_choices()
def get_flatchoices(self, include_blank=True, blank_choice=BLANK_CHOICE_DASH):
"Returns flattened choices with a default blank choice included."
first_choice = include_blank and blank_choice or []
return first_choice + list(self.flatchoices)
def _get_val_from_obj(self, obj):
if obj is not None:
return getattr(obj, self.attname)
else:
return self.get_default()
def value_to_string(self, obj):
"""
Returns a string value of this field from the passed obj.
This is used by the serialization framework.
"""
return smart_unicode(self._get_val_from_obj(obj))
def bind(self, fieldmapping, original, bound_field_class):
return bound_field_class(self, fieldmapping, original)
def _get_choices(self):
if hasattr(self._choices, 'next'):
choices, self._choices = tee(self._choices)
return choices
else:
return self._choices
choices = property(_get_choices)
def _get_flatchoices(self):
"""Flattened version of choices tuple."""
flat = []
for choice, value in self.choices:
if isinstance(value, (list, tuple)):
flat.extend(value)
else:
flat.append((choice,value))
return flat
flatchoices = property(_get_flatchoices)
def save_form_data(self, instance, data):
setattr(instance, self.name, data)
def formfield(self, form_class=forms.CharField, **kwargs):
"Returns a django.forms.Field instance for this database Field."
defaults = {'required': not self.blank, 'label': capfirst(self.verbose_name), 'help_text': self.help_text}
if self.has_default():
if callable(self.default):
defaults['initial'] = self.default
defaults['show_hidden_initial'] = True
else:
defaults['initial'] = self.get_default()
if self.choices:
# Fields with choices get special treatment.
include_blank = self.blank or not (self.has_default() or 'initial' in kwargs)
defaults['choices'] = self.get_choices(include_blank=include_blank)
defaults['coerce'] = self.to_python
if self.null:
defaults['empty_value'] = None
form_class = forms.TypedChoiceField
# Many of the subclass-specific formfield arguments (min_value,
# max_value) don't apply for choice fields, so be sure to only pass
# the values that TypedChoiceField will understand.
for k in kwargs.keys():
if k not in ('coerce', 'empty_value', 'choices', 'required',
'widget', 'label', 'initial', 'help_text',
'error_messages', 'show_hidden_initial'):
del kwargs[k]
defaults.update(kwargs)
return form_class(**defaults)
def value_from_object(self, obj):
"Returns the value of this field in the given model instance."
return getattr(obj, self.attname)
class AutoField(Field):
description = ugettext_lazy("Integer")
empty_strings_allowed = False
def __init__(self, *args, **kwargs):
assert kwargs.get('primary_key', False) is True, "%ss must have primary_key=True." % self.__class__.__name__
kwargs['blank'] = True
Field.__init__(self, *args, **kwargs)
def to_python(self, value):
if value is None:
return value
try:
return int(value)
except (TypeError, ValueError):
raise exceptions.ValidationError(
_("This value must be an integer."))
def get_db_prep_value(self, value):
if value is None:
return None
return int(value)
def contribute_to_class(self, cls, name):
assert not cls._meta.has_auto_field, "A model can't have more than one AutoField."
super(AutoField, self).contribute_to_class(cls, name)
cls._meta.has_auto_field = True
cls._meta.auto_field = self
def formfield(self, **kwargs):
return None
class BooleanField(Field):
empty_strings_allowed = False
description = ugettext_lazy("Boolean (Either True or False)")
def __init__(self, *args, **kwargs):
kwargs['blank'] = True
if 'default' not in kwargs and not kwargs.get('null'):
kwargs['default'] = False
Field.__init__(self, *args, **kwargs)
def get_internal_type(self):
return "BooleanField"
def to_python(self, value):
if value in (True, False): return value
if value in ('t', 'True', '1'): return True
if value in ('f', 'False', '0'): return False
raise exceptions.ValidationError(
_("This value must be either True or False."))
def get_db_prep_lookup(self, lookup_type, value):
# Special-case handling for filters coming from a web request (e.g. the
# admin interface). Only works for scalar values (not lists). If you're
# passing in a list, you might as well make things the right type when
# constructing the list.
if value in ('1', '0'):
value = bool(int(value))
return super(BooleanField, self).get_db_prep_lookup(lookup_type, value)
def get_db_prep_value(self, value):
if value is None:
return None
return bool(value)
def formfield(self, **kwargs):
# Unlike most fields, BooleanField figures out include_blank from
# self.null instead of self.blank.
if self.choices:
include_blank = self.null or not (self.has_default() or 'initial' in kwargs)
defaults = {'choices': self.get_choices(include_blank=include_blank)}
else:
defaults = {'form_class': forms.BooleanField}
defaults.update(kwargs)
return super(BooleanField, self).formfield(**defaults)
class CharField(Field):
description = ugettext_lazy("String (up to %(max_length)s)")
def get_internal_type(self):
return "CharField"
def to_python(self, value):
if isinstance(value, basestring):
return value
if value is None:
if self.null:
return value
else:
raise exceptions.ValidationError(
ugettext_lazy("This field cannot be null."))
return smart_unicode(value)
def get_db_prep_value(self, value):
return self.to_python(value)
def formfield(self, **kwargs):
defaults = {'max_length': self.max_length}
defaults.update(kwargs)
return super(CharField, self).formfield(**defaults)
# TODO: Maybe move this into contrib, because it's specialized.
class CommaSeparatedIntegerField(CharField):
description = ugettext_lazy("Comma-separated integers")
def formfield(self, **kwargs):
defaults = {
'form_class': forms.RegexField,
'regex': '^[\d,]+$',
'max_length': self.max_length,
'error_messages': {
'invalid': _(u'Enter only digits separated by commas.'),
}
}
defaults.update(kwargs)
return super(CommaSeparatedIntegerField, self).formfield(**defaults)
ansi_date_re = re.compile(r'^\d{4}-\d{1,2}-\d{1,2}$')
class DateField(Field):
description = ugettext_lazy("Date (without time)")
empty_strings_allowed = False
def __init__(self, verbose_name=None, name=None, auto_now=False, auto_now_add=False, **kwargs):
self.auto_now, self.auto_now_add = auto_now, auto_now_add
#HACKs : auto_now_add/auto_now should be done as a default or a pre_save.
if auto_now or auto_now_add:
kwargs['editable'] = False
kwargs['blank'] = True
Field.__init__(self, verbose_name, name, **kwargs)
def get_internal_type(self):
return "DateField"
def to_python(self, value):
if value is None:
return value
if isinstance(value, datetime.datetime):
return value.date()
if isinstance(value, datetime.date):
return value
if not ansi_date_re.search(value):
raise exceptions.ValidationError(
_('Enter a valid date in YYYY-MM-DD format.'))
# Now that we have the date string in YYYY-MM-DD format, check to make
# sure it's a valid date.
# We could use time.strptime here and catch errors, but datetime.date
# produces much friendlier error messages.
year, month, day = map(int, value.split('-'))
try:
return datetime.date(year, month, day)
except ValueError, e:
msg = _('Invalid date: %s') % _(str(e))
raise exceptions.ValidationError(msg)
def pre_save(self, model_instance, add):
if self.auto_now or (self.auto_now_add and add):
value = datetime.datetime.now()
setattr(model_instance, self.attname, value)
return value
else:
return super(DateField, self).pre_save(model_instance, add)
def contribute_to_class(self, cls, name):
super(DateField,self).contribute_to_class(cls, name)
if not self.null:
setattr(cls, 'get_next_by_%s' % self.name,
curry(cls._get_next_or_previous_by_FIELD, field=self, is_next=True))
setattr(cls, 'get_previous_by_%s' % self.name,
curry(cls._get_next_or_previous_by_FIELD, field=self, is_next=False))
def get_db_prep_lookup(self, lookup_type, value):
# For "__month", "__day", and "__week_day" lookups, convert the value
# to an int so the database backend always sees a consistent type.
if lookup_type in ('month', 'day', 'week_day'):
return [int(value)]
return super(DateField, self).get_db_prep_lookup(lookup_type, value)
def get_db_prep_value(self, value):
# Casts dates into the format expected by the backend
return connection.ops.value_to_db_date(self.to_python(value))
def value_to_string(self, obj):
val = self._get_val_from_obj(obj)
if val is None:
data = ''
else:
data = datetime_safe.new_date(val).strftime("%Y-%m-%d")
return data
def formfield(self, **kwargs):
defaults = {'form_class': forms.DateField}
defaults.update(kwargs)
return super(DateField, self).formfield(**defaults)
class DateTimeField(DateField):
description = ugettext_lazy("Date (with time)")
def get_internal_type(self):
return "DateTimeField"
def to_python(self, value):
if value is None:
return value
if isinstance(value, datetime.datetime):
return value
if isinstance(value, datetime.date):
return datetime.datetime(value.year, value.month, value.day)
# Attempt to parse a datetime:
value = smart_str(value)
# split usecs, because they are not recognized by strptime.
if '.' in value:
try:
value, usecs = value.split('.')
usecs = int(usecs)
except ValueError:
raise exceptions.ValidationError(
_('Enter a valid date/time in YYYY-MM-DD HH:MM[:ss[.uuuuuu]] format.'))
else:
usecs = 0
kwargs = {'microsecond': usecs}
try: # Seconds are optional, so try converting seconds first.
return datetime.datetime(*time.strptime(value, '%Y-%m-%d %H:%M:%S')[:6],
**kwargs)
except ValueError:
try: # Try without seconds.
return datetime.datetime(*time.strptime(value, '%Y-%m-%d %H:%M')[:5],
**kwargs)
except ValueError: # Try without hour/minutes/seconds.
try:
return datetime.datetime(*time.strptime(value, '%Y-%m-%d')[:3],
**kwargs)
except ValueError:
raise exceptions.ValidationError(
_('Enter a valid date/time in YYYY-MM-DD HH:MM[:ss[.uuuuuu]] format.'))
def get_db_prep_value(self, value):
# Casts dates into the format expected by the backend
return connection.ops.value_to_db_datetime(self.to_python(value))
def value_to_string(self, obj):
val = self._get_val_from_obj(obj)
if val is None:
data = ''
else:
d = datetime_safe.new_datetime(val)
data = d.strftime('%Y-%m-%d %H:%M:%S')
return data
def formfield(self, **kwargs):
defaults = {'form_class': forms.DateTimeField}
defaults.update(kwargs)
return super(DateTimeField, self).formfield(**defaults)
class DecimalField(Field):
empty_strings_allowed = False
description = ugettext_lazy("Decimal number")
def __init__(self, verbose_name=None, name=None, max_digits=None, decimal_places=None, **kwargs):
self.max_digits, self.decimal_places = max_digits, decimal_places
Field.__init__(self, verbose_name, name, **kwargs)
def get_internal_type(self):
return "DecimalField"
def to_python(self, value):
if value is None:
return value
try:
return decimal.Decimal(value)
except decimal.InvalidOperation:
raise exceptions.ValidationError(
_("This value must be a decimal number."))
def _format(self, value):
if isinstance(value, basestring) or value is None:
return value
else:
return self.format_number(value)
def format_number(self, value):
"""
Formats a number into a string with the requisite number of digits and
decimal places.
"""
# Method moved to django.db.backends.util.
#
# It is preserved because it is used by the oracle backend
# (django.db.backends.oracle.query), and also for
# backwards-compatibility with any external code which may have used
# this method.
from django.db.backends import util
return util.format_number(value, self.max_digits, self.decimal_places)
def get_db_prep_save(self, value):
return connection.ops.value_to_db_decimal(self.to_python(value),
self.max_digits, self.decimal_places)
def get_db_prep_value(self, value):
return self.to_python(value)
def formfield(self, **kwargs):
defaults = {
'max_digits': self.max_digits,
'decimal_places': self.decimal_places,
'form_class': forms.DecimalField,
}
defaults.update(kwargs)
return super(DecimalField, self).formfield(**defaults)
class EmailField(CharField):
description = ugettext_lazy("E-mail address")
def __init__(self, *args, **kwargs):
kwargs['max_length'] = kwargs.get('max_length', 75)
CharField.__init__(self, *args, **kwargs)
def formfield(self, **kwargs):
defaults = {'form_class': forms.EmailField}
defaults.update(kwargs)
return super(EmailField, self).formfield(**defaults)
class FilePathField(Field):
description = ugettext_lazy("File path")
def __init__(self, verbose_name=None, name=None, path='', match=None, recursive=False, **kwargs):
self.path, self.match, self.recursive = path, match, recursive
kwargs['max_length'] = kwargs.get('max_length', 100)
Field.__init__(self, verbose_name, name, **kwargs)
def formfield(self, **kwargs):
defaults = {
'path': self.path,
'match': self.match,
'recursive': self.recursive,
'form_class': forms.FilePathField,
}
defaults.update(kwargs)
return super(FilePathField, self).formfield(**defaults)
def get_internal_type(self):
return "FilePathField"
class FloatField(Field):
empty_strings_allowed = False
description = ugettext_lazy("Floating point number")
def get_db_prep_value(self, value):
if value is None:
return None
return float(value)
def get_internal_type(self):
return "FloatField"
def to_python(self, value):
if value is None:
return value
try:
return float(value)
except (TypeError, ValueError):
raise exceptions.ValidationError(
_("This value must be a float."))
def formfield(self, **kwargs):
defaults = {'form_class': forms.FloatField}
defaults.update(kwargs)
return super(FloatField, self).formfield(**defaults)
class IntegerField(Field):
empty_strings_allowed = False
description = ugettext_lazy("Integer")
def get_db_prep_value(self, value):
if value is None:
return None
return int(value)
def get_db_prep_lookup(self, lookup_type, value):
if (lookup_type == 'gte' or lookup_type == 'lt') \
and isinstance(value, float):
value = math.ceil(value)
return super(IntegerField, self).get_db_prep_lookup(lookup_type, value)
def get_internal_type(self):
return "IntegerField"
def to_python(self, value):
if value is None:
return value
try:
return int(value)
except (TypeError, ValueError):
raise exceptions.ValidationError(
_("This value must be an integer."))
def formfield(self, **kwargs):
defaults = {'form_class': forms.IntegerField}
defaults.update(kwargs)
return super(IntegerField, self).formfield(**defaults)
class IPAddressField(Field):
empty_strings_allowed = False
description = ugettext_lazy("IP address")
def __init__(self, *args, **kwargs):
kwargs['max_length'] = 15
Field.__init__(self, *args, **kwargs)
def get_internal_type(self):
return "IPAddressField"
def formfield(self, **kwargs):
defaults = {'form_class': forms.IPAddressField}
defaults.update(kwargs)
return super(IPAddressField, self).formfield(**defaults)
class NullBooleanField(Field):
empty_strings_allowed = False
description = ugettext_lazy("Boolean (Either True, False or None)")
def __init__(self, *args, **kwargs):
kwargs['null'] = True
Field.__init__(self, *args, **kwargs)
def get_internal_type(self):
return "NullBooleanField"
def to_python(self, value):
if value in (None, True, False): return value
if value in ('None',): return None
if value in ('t', 'True', '1'): return True
if value in ('f', 'False', '0'): return False
raise exceptions.ValidationError(
_("This value must be either None, True or False."))
def get_db_prep_lookup(self, lookup_type, value):
# Special-case handling for filters coming from a web request (e.g. the
# admin interface). Only works for scalar values (not lists). If you're
# passing in a list, you might as well make things the right type when
# constructing the list.
if value in ('1', '0'):
value = bool(int(value))
return super(NullBooleanField, self).get_db_prep_lookup(lookup_type, value)
def get_db_prep_value(self, value):
if value is None:
return None
return bool(value)
def formfield(self, **kwargs):
defaults = {
'form_class': forms.NullBooleanField,
'required': not self.blank,
'label': capfirst(self.verbose_name),
'help_text': self.help_text}
defaults.update(kwargs)
return super(NullBooleanField, self).formfield(**defaults)
class PositiveIntegerField(IntegerField):
description = ugettext_lazy("Integer")
def get_internal_type(self):
return "PositiveIntegerField"
def formfield(self, **kwargs):
defaults = {'min_value': 0}
defaults.update(kwargs)
return super(PositiveIntegerField, self).formfield(**defaults)
class PositiveSmallIntegerField(IntegerField):
description = ugettext_lazy("Integer")
def get_internal_type(self):
return "PositiveSmallIntegerField"
def formfield(self, **kwargs):
defaults = {'min_value': 0}
defaults.update(kwargs)
return super(PositiveSmallIntegerField, self).formfield(**defaults)
class SlugField(CharField):
description = ugettext_lazy("String (up to %(max_length)s)")
def __init__(self, *args, **kwargs):
kwargs['max_length'] = kwargs.get('max_length', 50)
# Set db_index=True unless it's been set manually.
if 'db_index' not in kwargs:
kwargs['db_index'] = True
super(SlugField, self).__init__(*args, **kwargs)
def get_internal_type(self):
return "SlugField"
def formfield(self, **kwargs):
defaults = {'form_class': forms.SlugField}
defaults.update(kwargs)
return super(SlugField, self).formfield(**defaults)
class SmallIntegerField(IntegerField):
description = ugettext_lazy("Integer")
def get_internal_type(self):
return "SmallIntegerField"
class TextField(Field):
description = ugettext_lazy("Text")
def get_internal_type(self):
return "TextField"
def get_db_prep_value(self, value):
if isinstance(value, basestring) or value is None:
return value
return smart_unicode(value)
def formfield(self, **kwargs):
defaults = {'widget': forms.Textarea}
defaults.update(kwargs)
return super(TextField, self).formfield(**defaults)
class TimeField(Field):
description = ugettext_lazy("Time")
empty_strings_allowed = False
def __init__(self, verbose_name=None, name=None, auto_now=False, auto_now_add=False, **kwargs):
self.auto_now, self.auto_now_add = auto_now, auto_now_add
if auto_now or auto_now_add:
kwargs['editable'] = False
Field.__init__(self, verbose_name, name, **kwargs)
def get_internal_type(self):
return "TimeField"
def to_python(self, value):
if value is None:
return None
if isinstance(value, datetime.time):
return value
if isinstance(value, datetime.datetime):
# Not usually a good idea to pass in a datetime here (it loses
# information), but this can be a side-effect of interacting with a
# database backend (e.g. Oracle), so we'll be accommodating.
return value.time()
# Attempt to parse a datetime:
value = smart_str(value)
# split usecs, because they are not recognized by strptime.
if '.' in value:
try:
value, usecs = value.split('.')
usecs = int(usecs)
except ValueError:
raise exceptions.ValidationError(
_('Enter a valid time in HH:MM[:ss[.uuuuuu]] format.'))
else:
usecs = 0
kwargs = {'microsecond': usecs}
try: # Seconds are optional, so try converting seconds first.
return datetime.time(*time.strptime(value, '%H:%M:%S')[3:6],
**kwargs)
except ValueError:
try: # Try without seconds.
return datetime.time(*time.strptime(value, '%H:%M')[3:5],
**kwargs)
except ValueError:
raise exceptions.ValidationError(
_('Enter a valid time in HH:MM[:ss[.uuuuuu]] format.'))
def pre_save(self, model_instance, add):
if self.auto_now or (self.auto_now_add and add):
value = datetime.datetime.now().time()
setattr(model_instance, self.attname, value)
return value
else:
return super(TimeField, self).pre_save(model_instance, add)
def get_db_prep_value(self, value):
# Casts times into the format expected by the backend
return connection.ops.value_to_db_time(self.to_python(value))
def value_to_string(self, obj):
val = self._get_val_from_obj(obj)
if val is None:
data = ''
else:
data = val.strftime("%H:%M:%S")
return data
def formfield(self, **kwargs):
defaults = {'form_class': forms.TimeField}
defaults.update(kwargs)
return super(TimeField, self).formfield(**defaults)
class URLField(CharField):
description = ugettext_lazy("URL")
def __init__(self, verbose_name=None, name=None, verify_exists=True, **kwargs):
kwargs['max_length'] = kwargs.get('max_length', 200)
self.verify_exists = verify_exists
CharField.__init__(self, verbose_name, name, **kwargs)
def formfield(self, **kwargs):
defaults = {'form_class': forms.URLField, 'verify_exists': self.verify_exists}
defaults.update(kwargs)
return super(URLField, self).formfield(**defaults)
class XMLField(TextField):
description = ugettext_lazy("XML text")
def __init__(self, verbose_name=None, name=None, schema_path=None, **kwargs):
self.schema_path = schema_path
Field.__init__(self, verbose_name, name, **kwargs)
|
|
# Copyright 2011 Eldar Nugaev
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from lxml import etree
from oslo.serialization import jsonutils
import webob
from nova.api.openstack.compute.contrib import keypairs as keypairs_v2
from nova.api.openstack.compute.plugins.v3 import keypairs as keypairs_v21
from nova.api.openstack import wsgi
from nova import db
from nova import exception
from nova.openstack.common import policy as common_policy
from nova import policy
from nova import quota
from nova import test
from nova.tests.api.openstack import fakes
from nova.tests.objects import test_keypair
QUOTAS = quota.QUOTAS
keypair_data = {
'public_key': 'FAKE_KEY',
'fingerprint': 'FAKE_FINGERPRINT',
}
def fake_keypair(name):
return dict(test_keypair.fake_keypair,
name=name, **keypair_data)
def db_key_pair_get_all_by_user(self, user_id):
return [fake_keypair('FAKE')]
def db_key_pair_create(self, keypair):
return fake_keypair(name=keypair['name'])
def db_key_pair_destroy(context, user_id, name):
if not (user_id and name):
raise Exception()
def db_key_pair_create_duplicate(context, keypair):
raise exception.KeyPairExists(key_name=keypair.get('name', ''))
class KeypairsTestV21(test.TestCase):
base_url = '/v2/fake'
def _setup_app(self):
self.app = fakes.wsgi_app_v21(init_only=('os-keypairs', 'servers'))
self.app_server = self.app
def setUp(self):
super(KeypairsTestV21, self).setUp()
fakes.stub_out_networking(self.stubs)
fakes.stub_out_rate_limiting(self.stubs)
self.stubs.Set(db, "key_pair_get_all_by_user",
db_key_pair_get_all_by_user)
self.stubs.Set(db, "key_pair_create",
db_key_pair_create)
self.stubs.Set(db, "key_pair_destroy",
db_key_pair_destroy)
self.flags(
osapi_compute_extension=[
'nova.api.openstack.compute.contrib.select_extensions'],
osapi_compute_ext_list=['Keypairs'])
self._setup_app()
def test_keypair_list(self):
req = webob.Request.blank(self.base_url + '/os-keypairs')
res = req.get_response(self.app)
self.assertEqual(res.status_int, 200)
res_dict = jsonutils.loads(res.body)
response = {'keypairs': [{'keypair': dict(keypair_data, name='FAKE')}]}
self.assertEqual(res_dict, response)
def test_keypair_create(self):
body = {'keypair': {'name': 'create_test'}}
req = webob.Request.blank(self.base_url + '/os-keypairs')
req.method = 'POST'
req.body = jsonutils.dumps(body)
req.headers['Content-Type'] = 'application/json'
res = req.get_response(self.app)
self.assertEqual(res.status_int, 200)
res_dict = jsonutils.loads(res.body)
self.assertTrue(len(res_dict['keypair']['fingerprint']) > 0)
self.assertTrue(len(res_dict['keypair']['private_key']) > 0)
def _test_keypair_create_bad_request_case(self, body):
req = webob.Request.blank(self.base_url + '/os-keypairs')
req.method = 'POST'
req.body = jsonutils.dumps(body)
req.headers['Content-Type'] = 'application/json'
res = req.get_response(self.app)
self.assertEqual(res.status_int, 400)
def test_keypair_create_with_empty_name(self):
body = {'keypair': {'name': ''}}
self._test_keypair_create_bad_request_case(body)
def test_keypair_create_with_name_too_long(self):
body = {
'keypair': {
'name': 'a' * 256
}
}
self._test_keypair_create_bad_request_case(body)
def test_keypair_create_with_non_alphanumeric_name(self):
body = {
'keypair': {
'name': 'test/keypair'
}
}
self._test_keypair_create_bad_request_case(body)
def test_keypair_import_bad_key(self):
body = {
'keypair': {
'name': 'create_test',
'public_key': 'ssh-what negative',
},
}
self._test_keypair_create_bad_request_case(body)
def test_keypair_create_with_invalid_keypair_body(self):
body = {'alpha': {'name': 'create_test'}}
self._test_keypair_create_bad_request_case(body)
def test_keypair_import(self):
body = {
'keypair': {
'name': 'create_test',
'public_key': 'ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQDBYIznA'
'x9D7118Q1VKGpXy2HDiKyUTM8XcUuhQpo0srqb9rboUp4'
'a9NmCwpWpeElDLuva707GOUnfaBAvHBwsRXyxHJjRaI6Y'
'Qj2oLJwqvaSaWUbyT1vtryRqy6J3TecN0WINY71f4uymi'
'MZP0wby4bKBcYnac8KiCIlvkEl0ETjkOGUq8OyWRmn7lj'
'j5SESEUdBP0JnuTFKddWTU/wD6wydeJaUhBTqOlHn0kX1'
'GyqoNTE1UEhcM5ZRWgfUZfTjVyDF2kGj3vJLCJtJ8LoGc'
'j7YaN4uPg1rBle+izwE/tLonRrds+cev8p6krSSrxWOwB'
'bHkXa6OciiJDvkRzJXzf',
},
}
req = webob.Request.blank(self.base_url + '/os-keypairs')
req.method = 'POST'
req.body = jsonutils.dumps(body)
req.headers['Content-Type'] = 'application/json'
res = req.get_response(self.app)
self.assertEqual(res.status_int, 200)
# FIXME(ja): sholud we check that public_key was sent to create?
res_dict = jsonutils.loads(res.body)
self.assertTrue(len(res_dict['keypair']['fingerprint']) > 0)
self.assertNotIn('private_key', res_dict['keypair'])
def test_keypair_import_quota_limit(self):
def fake_quotas_count(self, context, resource, *args, **kwargs):
return 100
self.stubs.Set(QUOTAS, "count", fake_quotas_count)
body = {
'keypair': {
'name': 'create_test',
'public_key': 'ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQDBYIznA'
'x9D7118Q1VKGpXy2HDiKyUTM8XcUuhQpo0srqb9rboUp4'
'a9NmCwpWpeElDLuva707GOUnfaBAvHBwsRXyxHJjRaI6Y'
'Qj2oLJwqvaSaWUbyT1vtryRqy6J3TecN0WINY71f4uymi'
'MZP0wby4bKBcYnac8KiCIlvkEl0ETjkOGUq8OyWRmn7lj'
'j5SESEUdBP0JnuTFKddWTU/wD6wydeJaUhBTqOlHn0kX1'
'GyqoNTE1UEhcM5ZRWgfUZfTjVyDF2kGj3vJLCJtJ8LoGc'
'j7YaN4uPg1rBle+izwE/tLonRrds+cev8p6krSSrxWOwB'
'bHkXa6OciiJDvkRzJXzf',
},
}
req = webob.Request.blank(self.base_url + '/os-keypairs')
req.method = 'POST'
req.body = jsonutils.dumps(body)
req.headers['Content-Type'] = 'application/json'
res = req.get_response(self.app)
self.assertEqual(res.status_int, 403)
res_dict = jsonutils.loads(res.body)
self.assertEqual(
"Quota exceeded, too many key pairs.",
res_dict['forbidden']['message'])
def test_keypair_create_quota_limit(self):
def fake_quotas_count(self, context, resource, *args, **kwargs):
return 100
self.stubs.Set(QUOTAS, "count", fake_quotas_count)
body = {
'keypair': {
'name': 'create_test',
},
}
req = webob.Request.blank(self.base_url + '/os-keypairs')
req.method = 'POST'
req.body = jsonutils.dumps(body)
req.headers['Content-Type'] = 'application/json'
res = req.get_response(self.app)
self.assertEqual(res.status_int, 403)
res_dict = jsonutils.loads(res.body)
self.assertEqual(
"Quota exceeded, too many key pairs.",
res_dict['forbidden']['message'])
def test_keypair_create_duplicate(self):
self.stubs.Set(db, "key_pair_create", db_key_pair_create_duplicate)
body = {'keypair': {'name': 'create_duplicate'}}
req = webob.Request.blank(self.base_url + '/os-keypairs')
req.method = 'POST'
req.body = jsonutils.dumps(body)
req.headers['Content-Type'] = 'application/json'
res = req.get_response(self.app)
self.assertEqual(res.status_int, 409)
res_dict = jsonutils.loads(res.body)
self.assertEqual(
"Key pair 'create_duplicate' already exists.",
res_dict['conflictingRequest']['message'])
def test_keypair_delete(self):
req = webob.Request.blank(self.base_url + '/os-keypairs/FAKE')
req.method = 'DELETE'
req.headers['Content-Type'] = 'application/json'
res = req.get_response(self.app)
self.assertEqual(res.status_int, 202)
def test_keypair_get_keypair_not_found(self):
req = webob.Request.blank(self.base_url + '/os-keypairs/DOESNOTEXIST')
res = req.get_response(self.app)
self.assertEqual(res.status_int, 404)
def test_keypair_delete_not_found(self):
def db_key_pair_get_not_found(context, user_id, name):
raise exception.KeypairNotFound(user_id=user_id, name=name)
self.stubs.Set(db, "key_pair_get",
db_key_pair_get_not_found)
req = webob.Request.blank(self.base_url + '/os-keypairs/WHAT')
res = req.get_response(self.app)
self.assertEqual(res.status_int, 404)
def test_keypair_show(self):
def _db_key_pair_get(context, user_id, name):
return dict(test_keypair.fake_keypair,
name='foo', public_key='XXX', fingerprint='YYY')
self.stubs.Set(db, "key_pair_get", _db_key_pair_get)
req = webob.Request.blank(self.base_url + '/os-keypairs/FAKE')
req.method = 'GET'
req.headers['Content-Type'] = 'application/json'
res = req.get_response(self.app)
res_dict = jsonutils.loads(res.body)
self.assertEqual(res.status_int, 200)
self.assertEqual('foo', res_dict['keypair']['name'])
self.assertEqual('XXX', res_dict['keypair']['public_key'])
self.assertEqual('YYY', res_dict['keypair']['fingerprint'])
def test_keypair_show_not_found(self):
def _db_key_pair_get(context, user_id, name):
raise exception.KeypairNotFound(user_id=user_id, name=name)
self.stubs.Set(db, "key_pair_get", _db_key_pair_get)
req = webob.Request.blank(self.base_url + '/os-keypairs/FAKE')
req.method = 'GET'
req.headers['Content-Type'] = 'application/json'
res = req.get_response(self.app)
self.assertEqual(res.status_int, 404)
def test_show_server(self):
self.stubs.Set(db, 'instance_get',
fakes.fake_instance_get())
self.stubs.Set(db, 'instance_get_by_uuid',
fakes.fake_instance_get())
req = webob.Request.blank(self.base_url + '/servers/1')
req.headers['Content-Type'] = 'application/json'
response = req.get_response(self.app_server)
self.assertEqual(response.status_int, 200)
res_dict = jsonutils.loads(response.body)
self.assertIn('key_name', res_dict['server'])
self.assertEqual(res_dict['server']['key_name'], '')
def test_detail_servers(self):
self.stubs.Set(db, 'instance_get_all_by_filters',
fakes.fake_instance_get_all_by_filters())
req = fakes.HTTPRequest.blank(self.base_url + '/servers/detail')
res = req.get_response(self.app_server)
server_dicts = jsonutils.loads(res.body)['servers']
self.assertEqual(len(server_dicts), 5)
for server_dict in server_dicts:
self.assertIn('key_name', server_dict)
self.assertEqual(server_dict['key_name'], '')
class KeypairPolicyTestV21(test.TestCase):
KeyPairController = keypairs_v21.KeypairController()
policy_path = 'compute_extension:v3:os-keypairs'
base_url = '/v2/fake'
def setUp(self):
super(KeypairPolicyTestV21, self).setUp()
def _db_key_pair_get(context, user_id, name):
return dict(test_keypair.fake_keypair,
name='foo', public_key='XXX', fingerprint='YYY')
self.stubs.Set(db, "key_pair_get",
_db_key_pair_get)
self.stubs.Set(db, "key_pair_get_all_by_user",
db_key_pair_get_all_by_user)
self.stubs.Set(db, "key_pair_create",
db_key_pair_create)
self.stubs.Set(db, "key_pair_destroy",
db_key_pair_destroy)
def test_keypair_list_fail_policy(self):
rules = {self.policy_path + ':index':
common_policy.parse_rule('role:admin')}
policy.set_rules(rules)
req = fakes.HTTPRequest.blank(self.base_url + '/os-keypairs')
self.assertRaises(exception.Forbidden,
self.KeyPairController.index,
req)
def test_keypair_list_pass_policy(self):
rules = {self.policy_path + ':index':
common_policy.parse_rule('')}
policy.set_rules(rules)
req = fakes.HTTPRequest.blank(self.base_url + '/os-keypairs')
res = self.KeyPairController.index(req)
self.assertIn('keypairs', res)
def test_keypair_show_fail_policy(self):
rules = {self.policy_path + ':show':
common_policy.parse_rule('role:admin')}
policy.set_rules(rules)
req = fakes.HTTPRequest.blank(self.base_url + '/os-keypairs/FAKE')
self.assertRaises(exception.Forbidden,
self.KeyPairController.show,
req, 'FAKE')
def test_keypair_show_pass_policy(self):
rules = {self.policy_path + ':show':
common_policy.parse_rule('')}
policy.set_rules(rules)
req = fakes.HTTPRequest.blank(self.base_url + '/os-keypairs/FAKE')
res = self.KeyPairController.show(req, 'FAKE')
self.assertIn('keypair', res)
def test_keypair_create_fail_policy(self):
body = {'keypair': {'name': 'create_test'}}
rules = {self.policy_path + ':create':
common_policy.parse_rule('role:admin')}
policy.set_rules(rules)
req = fakes.HTTPRequest.blank(self.base_url + '/os-keypairs')
req.method = 'POST'
self.assertRaises(exception.Forbidden,
self.KeyPairController.create,
req, body=body)
def test_keypair_create_pass_policy(self):
body = {'keypair': {'name': 'create_test'}}
rules = {self.policy_path + ':create':
common_policy.parse_rule('')}
policy.set_rules(rules)
req = fakes.HTTPRequest.blank(self.base_url + '/os-keypairs')
req.method = 'POST'
res = self.KeyPairController.create(req, body=body)
self.assertIn('keypair', res)
def test_keypair_delete_fail_policy(self):
rules = {self.policy_path + ':delete':
common_policy.parse_rule('role:admin')}
policy.set_rules(rules)
req = fakes.HTTPRequest.blank(self.base_url + '/os-keypairs/FAKE')
req.method = 'DELETE'
self.assertRaises(exception.Forbidden,
self.KeyPairController.delete,
req, 'FAKE')
def test_keypair_delete_pass_policy(self):
rules = {self.policy_path + ':delete':
common_policy.parse_rule('')}
policy.set_rules(rules)
req = fakes.HTTPRequest.blank(self.base_url + '/os-keypairs/FAKE')
req.method = 'DELETE'
res = self.KeyPairController.delete(req, 'FAKE')
# NOTE: on v2.1, http status code is set as wsgi_code of API
# method instead of status_int in a response object.
if isinstance(self.KeyPairController, keypairs_v21.KeypairController):
status_int = self.KeyPairController.delete.wsgi_code
else:
status_int = res.status_int
self.assertEqual(202, status_int)
class KeypairsXMLSerializerTest(test.TestCase):
def setUp(self):
super(KeypairsXMLSerializerTest, self).setUp()
self.deserializer = wsgi.XMLDeserializer()
def test_default_serializer(self):
exemplar = dict(keypair=dict(
public_key='fake_public_key',
private_key='fake_private_key',
fingerprint='fake_fingerprint',
user_id='fake_user_id',
name='fake_key_name'))
serializer = keypairs_v2.KeypairTemplate()
text = serializer.serialize(exemplar)
tree = etree.fromstring(text)
self.assertEqual('keypair', tree.tag)
for child in tree:
self.assertIn(child.tag, exemplar['keypair'])
self.assertEqual(child.text, exemplar['keypair'][child.tag])
def test_index_serializer(self):
exemplar = dict(keypairs=[
dict(keypair=dict(
name='key1_name',
public_key='key1_key',
fingerprint='key1_fingerprint')),
dict(keypair=dict(
name='key2_name',
public_key='key2_key',
fingerprint='key2_fingerprint'))])
serializer = keypairs_v2.KeypairsTemplate()
text = serializer.serialize(exemplar)
tree = etree.fromstring(text)
self.assertEqual('keypairs', tree.tag)
self.assertEqual(len(exemplar['keypairs']), len(tree))
for idx, keypair in enumerate(tree):
self.assertEqual('keypair', keypair.tag)
kp_data = exemplar['keypairs'][idx]['keypair']
for child in keypair:
self.assertIn(child.tag, kp_data)
self.assertEqual(child.text, kp_data[child.tag])
def test_deserializer(self):
exemplar = dict(keypair=dict(
name='key_name',
public_key='public_key'))
intext = ("<?xml version='1.0' encoding='UTF-8'?>\n"
'<keypair><name>key_name</name>'
'<public_key>public_key</public_key></keypair>')
result = self.deserializer.deserialize(intext)['body']
self.assertEqual(result, exemplar)
class KeypairsTestV2(KeypairsTestV21):
def _setup_app(self):
self.app = fakes.wsgi_app(init_only=('os-keypairs',))
self.app_server = fakes.wsgi_app(init_only=('servers',))
class KeypairPolicyTestV2(KeypairPolicyTestV21):
KeyPairController = keypairs_v2.KeypairController()
policy_path = 'compute_extension:keypairs'
|
|
import dateutil.parser as dateutil_parser
import hashlib
import json
import logging
import re
import requests
from datetime import date
from datetime import datetime
from datetime import timedelta
from dateutil import tz
from .util import LogProducer
class RefusePickup(LogProducer):
"""Parses a refuse pickup response"""
input_properties = {
'route_garbage': r'garbage pickup route for this location is <strong>(?P<value>[^<]+)</strong>',
'next_pickup_garbage': r'The next garbage collection pickup for this location is: <strong>(?P<value>[^<]+)</strong>',
'route_recycle': r'recycling pickup route for this location is <strong>(?P<value>[^<]+)</strong>',
'next_pickup_recycle': r'The next recycling collection pickup for this location is:\s*<strong>(?P<value>[^<]+)</strong>',
'next_pickup_recycle_after': r'The next estimated pickup time is between <strong>(?P<value>[^<]+)</strong> and <strong>(?P<before>[^<]+)</strong>',
'next_pickup_recycle_before': r'The next estimated pickup time is between <strong>(?P<after>[^<]+)</strong> and <strong>(?P<value>[^<]+)</strong>',
}
"""Maps the key to an attr name & value to a regex search"""
datetime_properties = [
'next_pickup_garbage',
'next_pickup_recycle',
'next_pickup_recycle_after',
'next_pickup_recycle_before',
]
pickup_offset = {
'hours': 8,
'minutes': 0,
}
"""Define what time the refuse must be outside by to make pickup time"""
pickup_tz = 'America/Chicago'
"""Define what timezone the pickup time is at"""
@classmethod
def json_serial(cls, obj):
"""JSON serializer for objects not serializable by default json code"""
if isinstance(obj, datetime):
serial = obj.isoformat()
return serial
raise TypeError("Type not serializable")
@classmethod
def from_html(cls, html_contents):
log = logging.getLogger(cls.__name__)
log.debug("Parsing {} bytes of HTML".format(len(html_contents)))
# Define TZ processing variables
to_zone = tz.gettz(cls.pickup_tz)
inst = cls()
for attr_name, regex in cls.input_properties.items():
log.debug("Searching for '{n}' with '{p}'".format(
n=attr_name,
p=regex
))
pattern = re.compile(regex)
match = pattern.search(html_contents)
try:
attr_value = match.group('value')
if attr_name in cls.datetime_properties:
log.debug("Parsing datetime({})".format(attr_name))
attr_value = dateutil_parser.parse(attr_value) \
.replace(tzinfo=to_zone) \
+ timedelta(**cls.pickup_offset)
except AttributeError as e:
log.warning("{t} (failed to match): {e}".format(
t=type(e).__name__,
e=e))
# No value was found, by default set an empty string
attr_value = ''
setattr(inst, attr_name, attr_value)
return inst
def to_dict(self):
"""
Returns pickup information in a JSON blob
:return: JSON blob of pickup data
:rtype: dict
"""
response_dict = {}
for key, value in self.input_properties.items():
key_value = getattr(self, key)
if isinstance(key_value, (datetime, date)):
key_value = key_value.isoformat()
response_dict.update({
key: key_value,
})
return response_dict
def __repr__(self):
return json.dumps(
self.to_dict(),
indent=4,
separators=(',', ': '),
default=self.json_serial)
class RefuseQueryAddress(object):
"""Defines an address to query for refuse pickup scheduling"""
STREET_TYPES = [
'AV', # Avenue
'BL', # Boulevard
'CR', # Circle
'CT', # Court
'DR', # Drive
'LA', # Lane
'PK', # Parkway
'PL', # Place
'RD', # Road
'SQ', # Square
'ST', # Street
'TR', # Terrace
'WY', # Way
]
"""Static list of address suffixes"""
class AddressDirection(object):
N = 'N'
S = 'S'
E = 'E'
W = 'W'
@classmethod
def FromString(cls, direction):
cls.IsValid(direction)
return getattr(
cls,
direction[:1].upper())
@classmethod
def IsValid(cls, direction):
assert hasattr(cls, direction[:1].upper())
def __init__(self, house_number, direction, street_name, street_type):
"""Instantiates a new address
:param house_number: Address number
:type house_number: str
:param direction: Address direction
:type AddressDirection:
"""
self.house_number = house_number
self.direction = self.AddressDirection.FromString(direction)
self._street_name = street_name
self._street_type = street_type
assert self.street_type in self.STREET_TYPES, \
"Invalid street type: {st}".format(
st=self.street_type)
@property
def street_name(self):
return self._street_name.upper()
@property
def street_type(self):
return self._street_type.upper()
def get_hash(self, salt=None):
md5_obj = hashlib.md5()
if salt:
md5_obj.update(salt.encode('utf-8'))
for parameter in ['house_number', 'direction', '_street_name',
'_street_type']:
md5_obj.update(getattr(self, parameter).encode('utf-8'))
return md5_obj.hexdigest()
class RefuseQuery(object):
"""Queries for garbage/recycle pickups based on an address"""
form_url = 'http://mpw.milwaukee.gov/services/garbage_day'
"""URL to POST form data to"""
parse_xpath = RefusePickup
"""Class to parse XHTML response with"""
@classmethod
def Execute(cls, refuse_address, html_output=None):
"""Queries the form URL & processes the response
:param refuse_address: Address to lookup
:type refuse_address: RefuseQueryAddress
:param html_output: Path to file for debugging HTML output
:type html_output: None|str
:return: Parsed response
:rtype: mkerefuse.refuse.RefusePickup
"""
response = requests.post(
cls.form_url,
data={
'laddr': refuse_address.house_number,
'sdir': refuse_address.direction,
'sname': refuse_address.street_name,
'stype': refuse_address.street_type,
'Submit': 'Submit',
})
if html_output is not None:
with open(html_output, 'w') as ofile:
ofile.write(response.text)
response_method = getattr(cls.parse_xpath, 'from_html')
return response_method(response.text)
|
|
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import os
import shutil
import six
import tempfile
import mock
from oslo_config import cfg
import semantic_version
import testtools
from murano.dsl import murano_package as dsl_package
from murano.engine import package_loader
from murano.tests.unit import base
from murano_tempest_tests import utils
CONF = cfg.CONF
class TestPackageCache(base.MuranoTestCase):
def setUp(self):
super(TestPackageCache, self).setUp()
self.location = tempfile.mkdtemp()
CONF.set_override('enable_packages_cache', True, 'engine')
self.old_location = CONF.engine.packages_cache
CONF.set_override('packages_cache', self.location, 'engine')
self.murano_client = mock.MagicMock()
package_loader.ApiPackageLoader.client = self.murano_client
self.loader = package_loader.ApiPackageLoader(None)
def tearDown(self):
CONF.set_override('packages_cache', self.old_location, 'engine')
shutil.rmtree(self.location, ignore_errors=True)
super(TestPackageCache, self).tearDown()
@testtools.skipIf(os.name == 'nt', "Doesn't work on Windows")
def test_load_package(self):
fqn = 'io.murano.apps.test'
path, name = utils.compose_package(
'test',
self.location, archive_dir=self.location)
with open(path, 'rb') as f:
package_data = f.read()
spec = semantic_version.Spec('*')
first_id, second_id, third_id = '123', '456', '789'
package = mock.MagicMock()
package.fully_qualified_name = fqn
package.id = first_id
package.version = '0.0.1'
self.murano_client.packages.filter = mock.MagicMock(
return_value=[package])
self.murano_client.packages.download = mock.MagicMock(
return_value=package_data)
# load the package
self.loader.load_class_package(fqn, spec)
# assert that everything got created
self.assertTrue(os.path.isdir(os.path.join(
self.location, fqn)))
self.assertTrue(os.path.isdir(os.path.join(
self.location, fqn, package.version)))
self.assertTrue(os.path.isdir(os.path.join(
self.location, fqn, package.version, first_id)))
self.assertTrue(os.path.isfile(os.path.join(
self.location, fqn, package.version, first_id, 'manifest.yaml')))
# assert, that we called download
self.assertEqual(self.murano_client.packages.download.call_count, 1)
# now that the cache is in place, call it for the 2d time
self.loader._package_cache = {}
self.loader._class_cache = {}
self.loader.load_class_package(fqn, spec)
# check that we didn't download a thing
self.assertEqual(self.murano_client.packages.download.call_count, 1)
# changing id, new package would be downloaded.
package.id = second_id
self.loader._package_cache = {}
self.loader._class_cache = {}
self.loader.load_class_package(fqn, spec)
# check that we didn't download a thing
self.assertEqual(self.murano_client.packages.download.call_count, 2)
# check that old directories were not deleted
# we did not call cleanup and did not release the locks
self.assertTrue(os.path.isdir(os.path.join(
self.location, fqn, package.version, first_id)))
# check that new directories got created correctly
self.assertTrue(os.path.isdir(os.path.join(
self.location, fqn)))
self.assertTrue(os.path.isdir(os.path.join(
self.location, fqn, package.version)))
self.assertTrue(os.path.isdir(os.path.join(
self.location, fqn, package.version, second_id)))
self.assertTrue(os.path.isfile(os.path.join(
self.location, fqn, package.version, second_id, 'manifest.yaml')))
self.assertTrue(os.path.isdir(os.path.join(
self.location, fqn, package.version)))
self.assertTrue(os.path.isdir(os.path.join(
self.location, fqn, package.version, second_id)))
# changing id, new package would be downloaded.
package.id = third_id
self.loader._package_cache = {}
self.loader._class_cache = {}
# release all the locks
self.loader.cleanup()
self.loader.load_class_package(fqn, spec)
# check that we didn't download a thing
self.assertEqual(self.murano_client.packages.download.call_count, 3)
# check that old directories were *deleted*
self.assertFalse(os.path.isdir(os.path.join(
self.location, fqn, package.version, first_id)))
self.assertFalse(os.path.isdir(os.path.join(
self.location, fqn, package.version, second_id)))
# check that new directories got created correctly
self.assertTrue(os.path.isdir(os.path.join(
self.location, fqn, package.version, third_id)))
self.assertTrue(os.path.isfile(os.path.join(
self.location, fqn, package.version, third_id, 'manifest.yaml')))
class TestCombinedPackageLoader(base.MuranoTestCase):
@classmethod
def setUpClass(cls):
super(TestCombinedPackageLoader, cls).setUpClass()
location = os.path.dirname(__file__)
CONF.set_override('load_packages_from', [location], 'engine',
enforce_type=True)
cls.execution_session = mock.MagicMock()
cls.loader = package_loader.CombinedPackageLoader(
cls.execution_session)
cls.api_loader = mock.MagicMock()
cls.loader.api_loader = cls.api_loader
cls.local_pkg_name = 'io.murano.test.MyTest'
cls.api_pkg_name = 'test.mpl.v1.app.Thing'
def test_loaders_initialized(self):
self.assertEqual(1, len(self.loader.directory_loaders),
'One directory class loader should be initialized'
' since there is one valid murano pl package in the'
' provided directory in config.')
self.assertIsInstance(self.loader.directory_loaders[0],
package_loader.DirectoryPackageLoader)
def test_get_package_by_class_directory_loader(self):
spec = semantic_version.Spec('*')
result = self.loader.load_class_package(self.local_pkg_name, spec)
self.assertIsInstance(result, dsl_package.MuranoPackage)
def test_get_package_by_name_directory_loader(self):
spec = semantic_version.Spec('*')
result = self.loader.load_package(self.local_pkg_name, spec)
self.assertIsInstance(result, dsl_package.MuranoPackage)
def test_get_package_by_class_api_loader(self):
spec = semantic_version.Spec('*')
self.loader.load_package(self.api_pkg_name, spec)
self.api_loader.load_package.assert_called_with(
self.api_pkg_name, spec)
def test_get_package_api_loader(self):
spec = semantic_version.Spec('*')
self.loader.load_class_package(self.api_pkg_name, spec)
self.api_loader.load_class_package.assert_called_with(
self.api_pkg_name, spec)
def test_register_package(self):
self.loader.register_package(self.api_pkg_name)
self.api_loader.register_package.assert_called_with(
self.api_pkg_name)
def test_export_fixation_table(self):
test_fixations = {'test_fix_1': ['1.0.0', '1.1.0']}
test_fixations_other = {'test_fix_2': ['2.0.0', '2.1.0']}
expected_fixations = dict(test_fixations, **test_fixations_other)
self.api_loader.export_fixation_table.return_value = test_fixations
self.loader.directory_loaders[0].export_fixation_table =\
mock.MagicMock(return_value=test_fixations_other)
serialized_fixations = self.loader.export_fixation_table()
self.assertEqual(1, self.api_loader.export_fixation_table.call_count)
for name, versions in six.iteritems(serialized_fixations):
versions.sort()
self.assertEqual(serialized_fixations, expected_fixations)
def test_import_fixation_table(self):
expected_fixations = {'test_fix_1': ['1.0.0', '1.1.0']}
self.api_loader.import_fixation_table = mock.MagicMock()
self.loader.import_fixation_table(expected_fixations)
self.api_loader.import_fixation_table.assert_called_once_with(
expected_fixations)
for name, versions in self.loader.directory_loaders[0]._fixations.\
iteritems():
for version in versions:
self.assertIn(str(version), expected_fixations[name])
expected_fixations[name].pop(
expected_fixations[name].index(str(version)))
def test_compact_fixation_table(self):
expected_fixations = {'test_fix_1': ['1.0.0', '1.1.0']}
self.api_loader.import_fixation_table = mock.MagicMock()
self.api_loader.compact_fixation_table = mock.MagicMock()
self.loader.import_fixation_table(expected_fixations)
self.api_loader.import_fixation_table.assert_called_once_with(
expected_fixations)
self.loader.compact_fixation_table()
self.api_loader.compact_fixation_table.assert_called_once_with()
for name, versions in self.loader.directory_loaders[0]._fixations.\
iteritems():
for version in versions:
self.assertIn(str(version), expected_fixations[name])
expected_fixations[name].pop(
expected_fixations[name].index(str(version)))
class TestApiPackageLoader(base.MuranoTestCase):
@classmethod
def setUpClass(cls):
super(TestApiPackageLoader, cls).setUpClass()
CONF.set_override('auth_uri', 'v3', group='keystone_authtoken')
cls.execution_session = mock.MagicMock()
cls.loader = package_loader.ApiPackageLoader(cls.execution_session)
cls.local_pkg_name = 'io.murano.test.MyTest'
cls.api_pkg_name = 'test.mpl.v1.app.Thing'
def test_client(self):
murano_client = self.loader.client
self.assertIsNotNone(murano_client)
|
|
print "importing stuff..."
import numpy as np
import pdb
# import matplotlib
# matplotlib.use('Agg')
import matplotlib.pylab as plt
from scipy import special
from .context import aep
from .datautils import step, spiral
def run_regression_1D():
np.random.seed(42)
print "create dataset ..."
N = 200
X = np.random.rand(N, 1)
Y = np.sin(12 * X) + 0.5 * np.cos(25 * X) + np.random.randn(N, 1) * 0.2
# plt.plot(X, Y, 'kx', mew=2)
def plot(m):
xx = np.linspace(-0.5, 1.5, 100)[:, None]
mean, var = m.predict_f(xx)
zu = m.sgp_layers[0].zu
mean_u, var_u = m.predict_f(zu)
plt.figure()
plt.plot(X, Y, 'kx', mew=2)
plt.plot(xx, mean, 'b', lw=2)
plt.fill_between(
xx[:, 0],
mean[:, 0] - 2 * np.sqrt(var[:, 0]),
mean[:, 0] + 2 * np.sqrt(var[:, 0]),
color='blue', alpha=0.2)
plt.errorbar(zu, mean_u, yerr=2 * np.sqrt(var_u), fmt='ro')
plt.xlim(-0.1, 1.1)
# inference
print "create model and optimize ..."
M = 20
hidden_size = [2]
model = aep.SDGPR_H(X, Y, M, hidden_size, lik='Gaussian')
model.optimise(method='L-BFGS-B', alpha=0.5, maxiter=2000)
plot(model)
plt.show()
plt.savefig('/tmp/aep_dgpr_1D.pdf')
def run_banana():
def gridParams():
mins = [-3.25, -2.85]
maxs = [3.65, 3.4]
nGrid = 50
xspaced = np.linspace(mins[0], maxs[0], nGrid)
yspaced = np.linspace(mins[1], maxs[1], nGrid)
xx, yy = np.meshgrid(xspaced, yspaced)
Xplot = np.vstack((xx.flatten(), yy.flatten())).T
return mins, maxs, xx, yy, Xplot
def plot(m):
col1 = '#0172B2'
col2 = '#CC6600'
mins, maxs, xx, yy, Xplot = gridParams()
mf, vf = m.predict_f(Xplot)
plt.figure()
plt.plot(
Xtrain[:, 0][Ytrain[:, 0] == 1],
Xtrain[:, 1][Ytrain[:, 0] == 1],
'o', color=col1, mew=0, alpha=0.5)
plt.plot(
Xtrain[:, 0][Ytrain[:, 0] == -1],
Xtrain[:, 1][Ytrain[:, 0] == -1],
'o', color=col2, mew=0, alpha=0.5)
zu = m.sgp_layers[0].zu
plt.plot(zu[:, 0], zu[:, 1], 'ro', mew=0, ms=4)
plt.contour(xx, yy, mf.reshape(*xx.shape),
[0], colors='k', linewidths=1.8, zorder=100)
Xtrain = np.loadtxt('./examples/data/banana_X_train.txt', delimiter=',')
Ytrain = np.loadtxt('./examples/data/banana_Y_train.txt',
delimiter=',').reshape(-1, 1)
Ytrain[np.where(Ytrain == 0)[0]] = -1
M = 30
hidden_size = [2]
model = aep.SDGPR_H(Xtrain, Ytrain, M, hidden_size, lik='Probit')
model.optimise(method='L-BFGS-B', alpha=1.0, maxiter=2000)
plot(model)
plt.show()
plt.savefig('/tmp/aep_dgpc_banana.pdf')
def run_regression_1D_stoc():
np.random.seed(42)
print "create dataset ..."
N = 200
X = np.random.rand(N, 1)
Y = np.sin(12 * X) + 0.5 * np.cos(25 * X) + np.random.randn(N, 1) * 0.2
# plt.plot(X, Y, 'kx', mew=2)
def plot(m):
xx = np.linspace(-0.5, 1.5, 100)[:, None]
mean, var = m.predict_f(xx)
zu = m.sgp_layers[0].zu
mean_u, var_u = m.predict_f(zu)
plt.figure()
plt.plot(X, Y, 'kx', mew=2)
plt.plot(xx, mean, 'b', lw=2)
plt.fill_between(
xx[:, 0],
mean[:, 0] - 2 * np.sqrt(var[:, 0]),
mean[:, 0] + 2 * np.sqrt(var[:, 0]),
color='blue', alpha=0.2)
plt.errorbar(zu, mean_u, yerr=2 * np.sqrt(var_u), fmt='ro')
plt.xlim(-0.1, 1.1)
# inference
print "create model and optimize ..."
M = 20
hidden_size = [2]
model = aep.SDGPR(X, Y, M, hidden_size, lik='Gaussian')
model.optimise(method='adam', alpha=1.0,
maxiter=50000, mb_size=M, adam_lr=0.001)
plot(model)
plt.show()
plt.savefig('/tmp/aep_dgpr_1D_stoc.pdf')
def run_banana_stoc():
def gridParams():
mins = [-3.25, -2.85]
maxs = [3.65, 3.4]
nGrid = 50
xspaced = np.linspace(mins[0], maxs[0], nGrid)
yspaced = np.linspace(mins[1], maxs[1], nGrid)
xx, yy = np.meshgrid(xspaced, yspaced)
Xplot = np.vstack((xx.flatten(), yy.flatten())).T
return mins, maxs, xx, yy, Xplot
def plot(m):
col1 = '#0172B2'
col2 = '#CC6600'
mins, maxs, xx, yy, Xplot = gridParams()
mf, vf = m.predict_f(Xplot)
plt.figure()
plt.plot(
Xtrain[:, 0][Ytrain[:, 0] == 1],
Xtrain[:, 1][Ytrain[:, 0] == 1],
'o', color=col1, mew=0, alpha=0.5)
plt.plot(
Xtrain[:, 0][Ytrain[:, 0] == -1],
Xtrain[:, 1][Ytrain[:, 0] == -1],
'o', color=col2, mew=0, alpha=0.5)
zu = m.sgp_layers[0].zu
plt.plot(zu[:, 0], zu[:, 1], 'ro', mew=0, ms=4)
plt.contour(xx, yy, mf.reshape(*xx.shape),
[0], colors='k', linewidths=1.8, zorder=100)
Xtrain = np.loadtxt('./examples/data/banana_X_train.txt', delimiter=',')
Ytrain = np.loadtxt('./examples/data/banana_Y_train.txt',
delimiter=',').reshape(-1, 1)
Ytrain[np.where(Ytrain == 0)[0]] = -1
M = 30
hidden_size = [2]
model = aep.SDGPR(Xtrain, Ytrain, M, hidden_size, lik='Probit')
mb_size = int(Xtrain.shape[0] / 4)
model.optimise(method='adam', alpha=1.0, maxiter=100000,
mb_size=mb_size, adam_lr=0.001)
plot(model)
plt.show()
plt.savefig('/tmp/aep_dgpc_banana_stoc.pdf')
def run_step_1D():
np.random.seed(42)
def step(x):
y = x.copy()
y[y < 0.0] = 0.0
y[y > 0.0] = 1.0
return y + 0.05 * np.random.randn(x.shape[0], 1)
print "create dataset ..."
N = 100
X = np.random.rand(N, 1) * 3 - 1.5
Y = step(X)
# plt.plot(X, Y, 'kx', mew=2)
def plot(m):
xx = np.linspace(-3, 3, 100)[:, None]
mean, var = m.predict_f(xx)
zu = m.sgp_layers[0].zu
mean_u, var_u = m.predict_f(zu)
plt.figure()
plt.plot(X, Y, 'kx', mew=2)
plt.plot(xx, mean, 'b', lw=2)
plt.fill_between(
xx[:, 0],
mean[:, 0] - 2 * np.sqrt(var[:, 0]),
mean[:, 0] + 2 * np.sqrt(var[:, 0]),
color='blue', alpha=0.2)
plt.errorbar(zu, mean_u, yerr=2 * np.sqrt(var_u), fmt='ro')
no_samples = 20
xx = np.linspace(-3, 3, 500)[:, None]
f_samples = m.sample_f(xx, no_samples)
for i in range(no_samples):
plt.plot(xx, f_samples[:, :, i], linewidth=0.5, alpha=0.5)
plt.xlim(-3, 3)
# inference
print "create model and optimize ..."
M = 20
hidden_size = [3, 2]
model = aep.SDGPR_H(X, Y, M, hidden_size, lik='Gaussian')
model.optimise(method='L-BFGS-B', alpha=1, maxiter=1000)
plot(model)
plt.show()
if __name__ == '__main__':
# run_regression_1D()
# run_banana()
run_step_1D()
# run_regression_1D_stoc()
# run_banana_stoc()
|
|
from __future__ import (absolute_import, division, print_function)
import numpy as np
from qtpy.QtWidgets import QMainWindow
from addie.utilities import load_ui
from addie.processing.mantid.master_table.table_row_handler import \
TableRowHandler
from addie.utilities import math_tools
from addie.processing.mantid.master_table.tree_definition import \
INDEX_OF_COLUMNS_WITH_MASS_DENSITY
from addie.processing.mantid.master_table.periodic_table.material_handler import \
retrieving_molecular_mass_and_number_of_atoms_worked
class MassDensityHandler:
def __init__(self, parent=None, key=None, data_type='sample'):
if parent.mass_density_ui is None:
o_mass = MassDensityWindow(
parent=parent, key=key, data_type=data_type)
parent.mass_density_ui = o_mass
if parent.mass_density_ui_position:
parent.mass_density_ui.move(parent.mass_density_ui_position)
o_mass.show()
else:
parent.mass_density_ui.setFocus()
parent.mass_density_ui.activateWindow()
class MassDensityWindow(QMainWindow):
chemical_formula_defined = False
geometry_dimensions_defined = False
total_number_of_atoms = np.NaN
total_molecular_mass = np.NaN
column = 0
precision = 5
def __init__(self, parent=None, key=None, data_type='sample'):
self.parent = parent
self.key = key
self.data_type = data_type
QMainWindow.__init__(self, parent=parent)
self.ui = load_ui('mass_density.ui', baseinstance=self)
self.init_widgets()
self.set_column_index()
def _to_precision_string(self, value):
return "{:.{num}f}".format(value, num=self.precision)
def set_column_index(self):
self.column = INDEX_OF_COLUMNS_WITH_MASS_DENSITY[0] if self.data_type == 'sample' else \
INDEX_OF_COLUMNS_WITH_MASS_DENSITY[1]
def init_widgets(self):
self.ui.number_density_units.setText(u"Atoms/\u212B\u00B3")
self.ui.mass_density_label.setText(u"g/cm\u00B3")
self.ui.volume_units.setText(u"cm\u00B3")
self.ui.ok.setEnabled(False)
# error messages
self.ui.mass_density_error_message.setStyleSheet("color: red")
self.ui.number_density_error_message.setStyleSheet("color: red")
self.ui.mass_error_message.setStyleSheet("color: red")
# geometry
geometry = str(
self.parent.master_table_list_ui[self.key][self.data_type]['shape'].currentText())
self.ui.geometry_label.setText(geometry)
self.geometry_dimensions_defined = self._is_geometry_dimensions_defined()
if self.geometry_dimensions_defined:
self._calculate_and_display_geometry_volume()
self.chemical_formula_defined = self._is_chemical_formula_defined()
if self.chemical_formula_defined:
chemical_formula = self._get_chemical_formula()
molecular_mass, total_number_of_atoms = retrieving_molecular_mass_and_number_of_atoms_worked(
chemical_formula)
self.total_molecular_mass = molecular_mass
self.total_number_of_atoms = total_number_of_atoms
mass_density_list_ui = self.parent.master_table_list_ui[self.key][self.data_type]
mass_density_infos = mass_density_list_ui['mass_density_infos']
_mass_density = str(
mass_density_list_ui['mass_density']['text'].text())
self.ui.mass_density_line_edit.setText(_mass_density)
_mass_density_checked = mass_density_infos['mass_density']['selected']
_number_density_checked = mass_density_infos['number_density']['selected']
_number_density = mass_density_infos['number_density']['value']
self.ui.number_density_line_edit.setText(_number_density)
_mass_value = mass_density_infos['mass']['value']
self.ui.mass_line_edit.setText(_mass_value)
if _mass_density_checked:
self.ui.mass_density_radio_button.setChecked(True)
elif _number_density_checked:
self.ui.number_density_radio_button.setChecked(True)
else:
self.ui.mass_geometry_radio_button.setChecked(True)
self.radio_button_changed()
def _get_chemical_formula(self):
return self.parent.master_table_list_ui[self.key][self.data_type]['material']['text'].text(
)
def _is_chemical_formula_defined(self):
chemical_formula = self._get_chemical_formula()
if chemical_formula == "" or chemical_formula == 'N/A':
return False
return True
def _is_geometry_dimensions_defined(self):
geometry_defined = str(self.ui.geometry_label.text())
radius = str(self.parent.master_table_list_ui[self.key]
[self.data_type]['geometry']['radius']['value'].text())
radius2 = str(self.parent.master_table_list_ui[self.key]
[self.data_type]['geometry']['radius2']['value'].text())
height = str(self.parent.master_table_list_ui[self.key]
[self.data_type]['geometry']['height']['value'].text())
if geometry_defined.lower() == 'cylinder':
if math_tools.is_number(radius) and math_tools.is_number(height):
return True
elif geometry_defined.lower() == 'sphere':
if math_tools.is_number(radius):
return True
else:
if math_tools.is_number(radius) and math_tools.is_number(
radius2) and math_tools.is_number(height):
return True
return False
def _calculate_and_display_geometry_volume(self):
geometry_defined = str(self.ui.geometry_label.text())
radius = str(self.parent.master_table_list_ui[self.key]
[self.data_type]['geometry']['radius']['value'].text())
radius2 = str(self.parent.master_table_list_ui[self.key]
[self.data_type]['geometry']['radius2']['value'].text())
height = str(self.parent.master_table_list_ui[self.key]
[self.data_type]['geometry']['height']['value'].text())
# construct geometry object
geom = {
'Shape': geometry_defined,
'Radius': radius,
'Radius2': radius2,
'Height': height
}
volume = math_tools.get_volume_from_geometry(geom)
str_volume = "{:.4}".format(volume)
self.ui.volume_label.setText(str_volume)
def mass_density_value_changed(self):
mass_density = np.float(self.ui.mass_density_line_edit.text())
# calculate number density if chemical formula defined
if self.chemical_formula_defined:
natoms = self.total_number_of_atoms
molecular_mass = self.total_molecular_mass
number_density = math_tools.mass_density2number_density(
mass_density, natoms, molecular_mass)
number_density = self._to_precision_string(number_density)
else:
number_density = 'N/A'
# calculate mass if geometry defined
if self.geometry_dimensions_defined:
volume = np.float(self.ui.volume_label.text())
mass = math_tools.mass_density2mass(mass_density, volume)
mass = self._to_precision_string(mass)
else:
mass = 'N/A'
self.ui.number_density_line_edit.setText(number_density)
self.ui.mass_line_edit.setText(mass)
self.update_status_of_save_button()
def number_density_value_changed(self):
number_density = np.float(self.ui.number_density_line_edit.text())
# calculate mass density if chemical formula defined
if self.chemical_formula_defined:
natoms = self.total_number_of_atoms
molecular_mass = self.total_molecular_mass
mass_density = math_tools.number_density2mass_density(
number_density, natoms, molecular_mass)
mass_density = self._to_precision_string(mass_density)
# calculate mass if geometry defined
if self.geometry_dimensions_defined:
volume = np.float(self.ui.volume_label.text())
mass = math_tools.number_density2mass(
number_density, volume, natoms, molecular_mass)
mass = self._to_precision_string(mass)
else:
mass = 'N/A'
else:
mass_density = 'N/A'
mass = 'N/A'
self.ui.mass_density_line_edit.setText(mass_density)
self.ui.mass_line_edit.setText(mass)
self.update_status_of_save_button()
def mass_value_changed(self):
mass = np.float(self.ui.mass_line_edit.text())
# calculate mass if geometry defined
if self.geometry_dimensions_defined:
volume = np.float(self.ui.volume_label.text())
mass_density = math_tools.mass2mass_density(mass, volume)
mass_density = self._to_precision_string(mass_density)
# calculate mass if chemical formula defined
if self.chemical_formula_defined:
natoms = self.total_number_of_atoms
molecular_mass = self.total_molecular_mass
number_density = math_tools.mass2number_density(
mass, volume, natoms, molecular_mass)
number_density = self._to_precision_string(number_density)
else:
number_density = "N/A"
else:
mass_density = "N/A"
number_density = "N/A"
self.ui.mass_density_line_edit.setText(mass_density)
self.ui.number_density_line_edit.setText(number_density)
self.update_status_of_save_button()
def radio_button_changed(self):
mass_density_line_edit_status = False
number_density_line_edit_status = False
mass_line_edit_status = False
if self.ui.mass_density_radio_button.isChecked():
self.ui.mass_density_error_message.setVisible(False)
self.ui.number_density_error_message.setVisible(
not self.chemical_formula_defined)
self.ui.mass_error_message.setVisible(
not self.geometry_dimensions_defined)
mass_density_line_edit_status = True
elif self.ui.number_density_radio_button.isChecked():
self.ui.mass_density_error_message.setVisible(
not self.chemical_formula_defined)
self.ui.number_density_error_message.setVisible(False)
self.ui.mass_error_message.setVisible(
not self.chemical_formula_defined and not self.geometry_dimensions_defined)
number_density_line_edit_status = True
else:
self.ui.mass_density_error_message.setVisible(
not self.geometry_dimensions_defined)
self.ui.number_density_error_message.setVisible(
not self.chemical_formula_defined and not self.geometry_dimensions_defined)
self.ui.mass_error_message.setVisible(
not self.geometry_dimensions_defined)
mass_line_edit_status = True
self.ui.mass_line_edit.setEnabled(mass_line_edit_status)
self.ui.number_density_line_edit.setEnabled(
number_density_line_edit_status)
self.ui.mass_density_line_edit.setEnabled(
mass_density_line_edit_status)
self.update_status_of_save_button()
def update_status_of_save_button(self):
# check the active radio button and check if value is there to enable
# save button
enabled_save_button = False
if self.ui.mass_density_radio_button.isChecked():
string_value = str(self.ui.mass_density_line_edit.text())
if math_tools.is_number(string_value):
enabled_save_button = True
elif self.ui.number_density_radio_button.isChecked():
string_value = str(self.ui.number_density_line_edit.text())
if math_tools.is_number(string_value):
enabled_save_button = True
else:
string_value = str(self.ui.mass_line_edit.text())
if math_tools.is_number(string_value) and self.chemical_formula_defined and \
self.geometry_dimensions_defined:
enabled_save_button = True
self.ui.ok.setEnabled(enabled_save_button)
def save(self):
# first validate fields in case user forgot to hit enter before leaving
# window
if self.ui.mass_density_radio_button.isChecked():
self.mass_density_value_changed()
elif self.ui.number_density_radio_button.isChecked():
self.number_density_value_changed()
else:
self.mass_value_changed()
mass_density_list_ui = self.parent.master_table_list_ui[self.key][self.data_type]
mass_density_infos = mass_density_list_ui['mass_density_infos']
mass_density_flag = False
number_density_flag = False
mass_flag = False
if self.ui.mass_density_radio_button.isChecked():
mass_density_flag = True
elif self.ui.number_density_radio_button.isChecked():
number_density_flag = True
else:
mass_flag = True
mass_density = str(self.ui.mass_density_line_edit.text())
mass_density_list_ui['mass_density']['text'].setText(mass_density)
mass_density_infos['mass_density']['value'] = mass_density
mass_density_infos['mass_density']['selected'] = mass_density_flag
number_density = str(self.ui.number_density_line_edit.text())
mass_density_infos['number_density']['value'] = number_density
mass_density_infos['number_density']['selected'] = number_density_flag
mass = str(self.ui.mass_line_edit.text())
mass_density_infos['mass']['value'] = mass
mass_density_infos['mass']['selected'] = mass_flag
def accept(self):
self.save()
o_table = TableRowHandler(main_window=self.parent)
o_table.transfer_widget_states(
from_key=self.key, data_type=self.data_type)
self.parent.check_master_table_column_highlighting(column=self.column)
self.close()
def reject(self):
self.close()
def closeEvent(self, c):
self.parent.mass_density_ui = None
self.parent.mass_density_ui_position = self.pos()
|
|
### GRAPH ############################################################################################
# The NodeBox Graph library includes algorithms from NetworkX for
# betweenness centrality and eigenvector centrality, Connelly Barnes' implementation of
# Dijksta shortest paths (here) and the spring layout for JavaScript by Aslak Hellesoy
# and Dave Hoover (here). The goal of this library is visualization of small graphs (<200 elements),
# if you need something more robust we recommend using NetworkX.
### CREDITS ##########################################################################################
# Copyright (c) 2008 Tom De Smedt.
# See LICENSE.txt for details.
__author__ = "Tom De Smedt"
__version__ = "1.9.5.6"
__copyright__ = "Copyright (c) 2008 Tom De Smedt"
__license__ = "GPL"
######################################################################################################
import graph.cluster
import graph.event
import graph.layout
import graph.proximity
import graph.style
#### GRAPH NODE ######################################################################################
class node:
def __init__(self, graph, id="", radius=8, style=style.DEFAULT, category="", label=None,
properties={}):
""" A node with a unique id in the graph.
Its position is calculated by graph.layout.
The node's radius and style define how it looks onscreen.
"""
self.graph = graph
self.id = id
self.category = category
self.label = label or self.id
self.links = links()
self.vx = 0
self.vy = 0
self.force = layout.Point(0, 0)
self.r = radius
self.style = style
self._visited = False
self._betweenness = None
self._eigenvalue = None
for k, v in properties.items():
if not k in self.__dict__:
self.__dict__[k] = v
def _edges(self):
return self.links._edges.values()
edges = property(_edges)
def _is_leaf(self):
return len(self.links) == 1
is_leaf = property(_is_leaf)
def can_reach(self, node, traversable=lambda node, edge: True):
""" Returns True if given node can be reached over traversable edges.
To enforce edge direction, use a node==edge.node1 traversable.
"""
if isinstance(node, str):
node = self.graph[node]
for n in self.graph.nodes:
n._visited = False
return proximity.depth_first_search(self,
visit=lambda n: node == n,
traversable=traversable
)
def _get_betweenness(self):
if self._betweenness == None:
self.graph.betweenness_centrality()
return self._betweenness
betweenness = property(_get_betweenness)
traffic = betweenness
def _get_eigenvalue(self):
if self._eigenvalue == None:
self.graph.eigenvector_centrality()
return self._eigenvalue
eigenvalue = property(_get_eigenvalue)
weight = eigenvalue
def _x(self): return self.vx * self.graph.d
def _y(self): return self.vy * self.graph.d
x = property(_x)
y = property(_y)
def __contains__(self, pt):
""" True if pt.x, pt.y is inside the node's absolute position.
"""
if abs(self.graph.x+self.x-pt.x) < self.r*2 and \
abs(self.graph.y+self.y-pt.y) < self.r*2:
return True
else:
return False
def flatten(self, distance=1):
return cluster.flatten(self, distance)
def __and__(self, node, distance=1):
return cluster.intersection(
self.flatten(distance), node.flatten(distance))
def __or__(self, node, distance=1):
return cluster.union(
self.flatten(distance), node.flatten(distance))
def __sub__(self, node, distance=1):
return cluster.difference(
self.flatten(distance), node.flatten(distance))
def __repr__(self):
try: return "<"+str(self.id)+" node>"
except:
return "<"+self.id.encode("utf-8")+" node>"
def __str__(self):
try: return str(self.id)
except:
return self.id.encode("utf-8")
def __eq__(self, node):
if not isinstance(node, self.__class__): return False
return self.id == node.id
#### GRAPH NODE LINKS ################################################################################
class links(list):
""" A list in which each node has an associated edge.
The edge() method returns the edge for a given node id.
"""
def __init__(self):
self._edges = dict()
def append(self, node, edge=None):
if edge: self._edges[node.id] = edge
list.append(self, node)
def remove(self, node):
if node.id in self._edges:
del self._edges[node.id]
list.remove(self, node)
def edge(self, id):
if isinstance(id, node): id = id.id
return self._edges[id]
##### GRAPH EDGE #####################################################################################
class edge(object):
def __init__(self, node1, node2, weight=0.0, length=1.0, label="", properties={}):
self.node1 = node1
self.node2 = node2
self.weight = weight
self.length = length
self.label = label
for k, v in properties.items():
if not k in self.__dict__:
self.__dict__[k] = v
def _get_length(self):
return self._length
def _set_length(self, v):
self._length = max(0.1, v)
length = property(_get_length, _set_length)
#### GRAPH ###########################################################################################
LAYOUT_CIRCLE = "circle"
LAYOUT_SPRING = "spring"
layout_ = layout # there's also a "layout" parameter in graph.__init__()
class graph(dict):
def __init__(self, iterations=1000, distance=1.0, layout=LAYOUT_SPRING):
super().__init__()
self.__initialised = True
self.nodes = []
self.edges = []
self.root = None
# Calculates positions for nodes.
self.layout = layout_.__dict__[layout+"_layout"](self, iterations)
self.d = node(None).r * 2.5 * distance
# Hover, click and drag event handler.
self.events = event.events(self, _ctx)
# Enhanced dictionary of all styles.
self.styles = style.styles(self)
self.styles.append(style.style(style.DEFAULT, _ctx))
self.alpha = 0
# Try to specialize intensive math operations.
try:
import psyco
psyco.bind(self.layout._bounds)
psyco.bind(self.layout.iterate)
psyco.bind(self.__or__)
psyco.bind(cluster.flatten)
psyco.bind(cluster.subgraph)
psyco.bind(cluster.clique)
psyco.bind(cluster.partition)
psyco.bind(proximity.dijkstra_shortest_path)
psyco.bind(proximity.brandes_betweenness_centrality)
psyco.bind(proximity.eigenvector_centrality)
psyco.bind(style.edge_arrow)
psyco.bind(style.edge_label)
#print "using psyco"
except:
pass
def _get_distance(self):
return self.d / (node(None).r * 2.5)
def _set_distance(self, value):
self.d = node(None).r * 2.5 * value
distance = property(_get_distance, _set_distance)
def copy(self, empty=False):
""" Create a copy of the graph (by default with nodes and edges).
"""
g = graph(self.layout.n, self.distance, self.layout.type)
g.layout = self.layout.copy(g)
g.styles = self.styles.copy(g)
g.events = self.events.copy(g)
if not empty:
for n in self.nodes:
g.add_node(n.id, n.r, n.style, n.category, n.label, (n == self.root), n.__dict__)
for e in self.edges:
g.add_edge(e.node1.id, e.node2.id, e.weight, e.length, e.label, e.__dict__)
return g
def clear(self):
""" Remove nodes and edges and reset the layout.
"""
dict.clear(self)
self.nodes = []
self.edges = []
self.root = None
self.layout.i = 0
self.alpha = 0
def new_node(self, *args, **kwargs):
""" Returns a node object; can be overloaded when the node class is subclassed.
"""
return node(*args, **kwargs)
def new_edge(self, *args, **kwargs):
""" Returns an edge object; can be overloaded when the edge class is subclassed.
"""
return edge(*args, **kwargs)
def add_node(self, id, radius=8, style=style.DEFAULT, category="", label=None, root=False,
properties={}):
""" Add node from id and return the node object.
"""
if id in self:
return self[id]
if not isinstance(style, str) and "name" in style.__dict__:
style = style.name
n = self.new_node(self, id, radius, style, category, label, properties)
self[n.id] = n
self.nodes.append(n)
if root: self.root = n
return n
def add_nodes(self, nodes):
""" Add nodes from a list of id's.
"""
try: [self.add_node(n) for n in nodes]
except:
pass
def add_edge(self, id1, id2, weight=0.0, length=1.0, label="", properties={}):
""" Add weighted (0.0-1.0) edge between nodes, creating them if necessary.
The weight represents the importance of the connection (not the cost).
"""
if id1 == id2: return None
if not id1 in self:
self.add_node(id1)
if not id2 in self:
self.add_node(id2)
n1 = self[id1]
n2 = self[id2]
# If a->b already exists, don't re-create it.
# However, b->a may still pass.
if n1 in n2.links:
if n2.links.edge(n1).node1 == n1:
return self.edge(id1, id2)
weight = max(0.0, min(weight, 1.0))
e = self.new_edge(n1, n2, weight, length, label, properties)
self.edges.append(e)
n1.links.append(n2, e)
n2.links.append(n1, e)
return e
def remove_node(self, id):
""" Remove node with given id.
"""
if id in self:
n = self[id]
self.nodes.remove(n)
del self[id]
# Remove all edges involving id and all links to it.
for e in list(self.edges):
if n in (e.node1, e.node2):
if n in e.node1.links:
e.node1.links.remove(n)
if n in e.node2.links:
e.node2.links.remove(n)
self.edges.remove(e)
def remove_edge(self, id1, id2):
""" Remove edges between nodes with given id's.
"""
for e in list(self.edges):
if id1 in (e.node1.id, e.node2.id) and \
id2 in (e.node1.id, e.node2.id):
e.node1.links.remove(e.node2)
e.node2.links.remove(e.node1)
self.edges.remove(e)
def node(self, id):
""" Returns the node in the graph associated with the given id.
"""
if id in self:
return self[id]
return None
def edge(self, id1, id2):
""" Returns the edge between the nodes with given id1 and id2.
"""
if id1 in self and \
id2 in self and \
self[id2] in self[id1].links:
return self[id1].links.edge(id2)
return None
def __getattr__(self, a):
""" Returns the node in the graph associated with the given id.
"""
try:
return self.__getitem__(a)
except:
raise AttributeError("graph object has no attribute '"+str(a)+"'")
def update(self, iterations=10):
""" Iterates the graph layout and updates node positions.
"""
# The graph fades in when initially constructed.
self.alpha += 0.05
self.alpha = min(self.alpha, 1.0)
# Iterates over the graph's layout.
# Each step the graph's bounds are recalculated
# and a number of iterations are processed,
# more and more as the layout progresses.
if self.layout.i == 0:
self.layout.prepare()
self.layout.i += 1
elif self.layout.i == 1:
self.layout.iterate()
elif self.layout.i < self.layout.n:
n = min(iterations, self.layout.i / 10 + 1)
for i in range(n):
self.layout.iterate()
# Calculate the absolute center of the graph.
min_, max = self.layout.bounds
self.x = _ctx.WIDTH - max.x*self.d - min_.x*self.d
self.y = _ctx.HEIGHT - max.y*self.d - min_.y*self.d
self.x /= 2
self.y /= 2
return not self.layout.done
def solve(self):
""" Iterates the graph layout until done.
"""
self.layout.solve()
self.alpha = 1.0
def _done(self):
return self.layout.done
done = property(_done)
def offset(self, node):
""" Returns the distance from the center to the given node.
"""
x = self.x + node.x - _ctx.WIDTH/2
y = self.y + node.y - _ctx.HEIGHT/2
return x, y
def draw(self, dx=0, dy=0, weighted=False, directed=False, highlight=[], traffic=None):
""" Layout the graph incrementally.
The graph is drawn at the center of the canvas.
The weighted and directed parameters visualize edge weight and direction.
The highlight specifies list of connected nodes.
The path will be colored according to the "highlight" style.
Clicking and dragging events are monitored.
"""
self.update()
# Draw the graph background.
s = self.styles.default
s.graph_background(s)
# Center the graph on the canvas.
_ctx.push()
_ctx.translate(self.x+dx, self.y+dy)
# Indicate betweenness centrality.
if traffic:
if isinstance(traffic, bool):
traffic = 5
for n in self.nodes_by_betweenness()[:traffic]:
try:
s = self.styles[n.style]
except Exception:
s = self.styles.default
if s.graph_traffic:
s.graph_traffic(s, n, self.alpha)
# Draw the edges and their labels.
s = self.styles.default
if s.edges:
s.edges(s, self.edges, self.alpha, weighted, directed)
# Draw each node in the graph.
# Apply individual style to each node (or default).
for n in self.nodes:
try:
s = self.styles[n.style]
except Exception:
s = self.styles.default
if s.node:
s.node(s, n, self.alpha)
# Highlight the given shortest path.
try:
s = self.styles.highlight
except Exception:
s = self.styles.default
if s.path:
s.path(s, self, highlight)
# Draw node id's as labels on each node.
for n in self.nodes:
try:
s = self.styles[n.style]
except Exception:
s = self.styles.default
if s.node_label:
s.node_label(s, n, self.alpha)
# Events for clicked and dragged nodes.
# Nodes will resist being dragged by attraction and repulsion,
# put the event listener on top to get more direct feedback.
self.events.update()
_ctx.pop()
def prune(self, depth=0):
""" Removes all nodes with less or equal links than depth.
"""
for n in list(self.nodes):
if len(n.links) <= depth:
self.remove_node(n.id)
trim = prune
def shortest_path(self, id1, id2, heuristic=None, directed=False):
""" Returns a list of node id's connecting the two nodes.
"""
try:
return proximity.dijkstra_shortest_path(self, id1, id2, heuristic, directed)
except Exception:
return None
def betweenness_centrality(self, normalized=True, directed=False):
""" Calculates betweenness centrality and returns an node id -> weight dictionary.
Node betweenness weights are updated in the process.
"""
bc = proximity.brandes_betweenness_centrality(self, normalized, directed)
for id, w in bc.iteritems():
self[id]._betweenness = w
return bc
def eigenvector_centrality(self, normalized=True, reversed=True, rating={},
start=None, iterations=100, tolerance=0.0001):
""" Calculates eigenvector centrality and returns an node id -> weight dictionary.
Node eigenvalue weights are updated in the process.
"""
ec = proximity.eigenvector_centrality(
self, normalized, reversed, rating, start, iterations, tolerance
)
for id, w in ec.iteritems():
self[id]._eigenvalue = w
return ec
def nodes_by_betweenness(self, treshold=0.0):
""" Returns nodes sorted by betweenness centrality.
Nodes with a lot of passing traffic will be at the front of the list.
"""
nodes = [(n.betweenness, n) for n in self.nodes if n.betweenness > treshold]
nodes.sort()
nodes.reverse()
return [n for w, n in nodes]
nodes_by_traffic = nodes_by_betweenness
def nodes_by_eigenvalue(self, treshold=0.0):
""" Returns nodes sorted by eigenvector centrality.
Nodes with a lot of incoming traffic will be at the front of the list
"""
nodes = [(n.eigenvalue, n) for n in self.nodes if n.eigenvalue > treshold]
nodes.sort()
nodes.reverse()
return [n for w, n in nodes]
nodes_by_weight = nodes_by_eigenvalue
def nodes_by_category(self, category):
""" Returns nodes with the given category attribute.
"""
return [n for n in self.nodes if n.category == category]
def _leaves(self):
""" Returns a list of nodes that have only one connection.
"""
return [node for node in self.nodes if node.is_leaf]
leaves = property(_leaves)
def crown(self, depth=2):
""" Returns a list of leaves, nodes connected to leaves, etc.
"""
nodes = []
for node in self.leaves:
nodes += node.flatten(depth-1)
return cluster.unique(nodes)
fringe = crown
def _density(self):
""" The number of edges in relation to the total number of possible edges.
"""
return 2.0*len(self.edges) / (len(self.nodes) * (len(self.nodes)-1))
density = property(_density)
def _is_complete(self):
return self.density == 1.0
def _is_dense(self):
return self.density > 0.65
def _is_sparse(self):
return self.density < 0.35
is_complete = property(_is_complete)
is_dense = property(_is_dense)
is_sparse = property(_is_sparse)
def sub(self, id, distance=1):
return cluster.subgraph(self, id, distance)
subgraph = sub
def __and__(self, graph):
nodes = cluster.intersection(cluster.flatten(self), cluster.flatten(graph))
all = self | graph
return cluster.subgraph(all, nodes, 0)
intersect = __and__
def __or__(self, graph):
g = self.copy()
for n in graph.nodes:
root = (g.root is None and graph.root==n)
g.add_node(n.id, n.r, n.style, n.category, n.label, root, n.__dict__)
for e in graph.edges:
g.add_edge(e.node1.id, e.node2.id, e.weight, e.length, e.label, e.__dict__)
return g
join = __or__
def __sub__(self, graph):
nodes = cluster.difference(cluster.flatten(self), cluster.flatten(graph))
all = self | graph
return cluster.subgraph(all, nodes, 0)
subtract = __sub__
def _is_clique(self):
return cluster.is_clique(self)
is_clique = property(_is_clique)
def clique(self, id, distance=0):
return cluster.subgraph(self, cluster.clique(self, id), distance)
def cliques(self, threshold=3, distance=0):
g = []
c = cluster.cliques(self, threshold)
for nodes in c:
g.append(cluster.subgraph(self, nodes, distance))
return g
def split(self):
return cluster.partition(self)
# DYNAMIC GRAPH ####################################################################################
class xgraph(graph):
""" A dynamic graph where a clicked node loads new data.
Nodes are clickable and will load a new graph based on
the following methods (that need to be subclassed or monkey patched):
1) has_node(id): returns True when the id is a node in the dataset.
2) get_links(id): a list of (weight, id) tuples directly connected to the node
3) get_cluster(id): a list of (weight, id, [links]) tuples of node id's that are
connected to the given node via the node id's in the links list (distance 2).
The idea is that you have a dataset stored in files or a database,
and use the dynamic graph's method to describe how the data is read
and interlinked. The graph is then automatically kept up to date
as you browse through the connected nodes.
"""
def __init__(self, iterations=500, distance=1.0, layout=LAYOUT_SPRING):
super().__init__(iterations, distance, layout)
self.styles = create().styles
self.events.click = self.click
self.max = 20
self._dx = 0
self._dy = 0
def has_node(self, id):
return True
def get_links(self, id):
return []
def get_cluster(self, id):
return []
def load(self, id):
""" Rebuilds the graph around the given node id.
"""
self.clear()
# Root node.
self.add_node(id, root=True)
# Directly connected nodes have priority.
for w, id2 in self.get_links(id):
self.add_edge(id, id2, weight=w)
if len(self) > self.max:
break
# Now get all the other nodes in the cluster.
for w, id2, links in self.get_cluster(id):
for id3 in links:
self.add_edge(id3, id2, weight=w)
self.add_edge(id, id3, weight=w)
#if len(links) == 0:
# self.add_edge(id, id2)
if len(self) > self.max:
break
# Provide a backlink to the previous root.
if self.event.clicked:
g.add_node(self.event.clicked)
def click(self, node):
""" Callback from graph.events when a node is clicked.
"""
if not self.has_node(node.id):
return
if node == self.root:
return
self._dx, self._dy = self.offset(node)
self.previous = self.root.id
self.load(node.id)
def draw(self, weighted=False, directed=False, highlight=[], traffic=None):
# A new graph unfolds from the position of the clicked node.
graph.draw(self, self._dx, self._dy,
weighted, directed, highlight, traffic
)
self._dx *= 0.9
self._dy *= 0.9
# COMMANDS ########################################################################################
def create(iterations=1000, distance=1.0, layout=LAYOUT_SPRING, depth=True):
""" Returns a new graph with predefined styling.
"""
global _ctx
try:
from nodebox.graphics import RGB
_ctx.colormode(RGB)
g = graph(iterations, distance, layout)
except Exception:
_ctx = None
g = graph(iterations, distance, layout)
return g
# Styles for different types of nodes.
s = style.style
g.styles.append(s(style.LIGHT , _ctx, fill = _ctx.color(0.0, 0.0, 0.0, 0.20)))
g.styles.append(s(style.DARK , _ctx, fill = _ctx.color(0.3, 0.5, 0.7, 0.75)))
g.styles.append(s(style.BACK , _ctx, fill = _ctx.color(0.5, 0.8, 0.0, 0.50)))
g.styles.append(s(style.IMPORTANT, _ctx, fill = _ctx.color(0.3, 0.6, 0.8, 0.75)))
g.styles.append(s(style.HIGHLIGHT, _ctx, stroke = _ctx.color(1.0, 0.0, 0.5), strokewidth=1.5))
g.styles.append(s(style.MARKED , _ctx))
g.styles.append(s(style.ROOT , _ctx, text = _ctx.color(1.0, 0.0, 0.4, 1.00),
stroke = _ctx.color(0.8, 0.8, 0.8, 0.60),
strokewidth = 1.5,
fontsize = 16,
textwidth = 150))
# Important nodes get a double stroke.
def important_node(s, node, alpha=1.0):
style.style(None, _ctx).node(s, node, alpha)
r = node.r * 1.4
_ctx.nofill()
_ctx.oval(node.x-r, node.y-r, r*2, r*2)
# Marked nodes have an inner dot.
def marked_node(s, node, alpha=1.0):
style.style(None, _ctx).node(s, node, alpha)
r = node.r * 0.3
_ctx.fill(s.stroke)
_ctx.oval(node.x-r, node.y-r, r*2, r*2)
g.styles.important.node = important_node
g.styles.marked.node = marked_node
g.styles.depth = depth
# Styling guidelines. All nodes have the default style, except:
# 1) a node directly connected to the root gets the LIGHT style.
# 2) a node with more than 4 edges gets the DARK style.
# 3) a node with a weight of 0.75-1.0 gets the IMPORTANT style.
# 4) the graph.root node gets the ROOT style.
# 5) the node last clicked gets the BACK style.
g.styles.guide.append(style.LIGHT , lambda graph, node: graph.root in node.links)
g.styles.guide.append(style.DARK , lambda graph, node: len(node.links) > 4)
g.styles.guide.append(style.IMPORTANT , lambda graph, node: node.weight > 0.75)
g.styles.guide.append(style.ROOT , lambda graph, node: node == graph.root)
g.styles.guide.append(style.BACK , lambda graph, node: node == graph.events.clicked)
# An additional rule applies every node's weight to its radius.
def balance(graph, node):
node.r = node.r*0.75 + node.r*node.weight*0.75
g.styles.guide.append("balance", balance)
# An additional rule that keeps leaf nodes closely clustered.
def cluster(graph, node):
if len(node.links) == 1:
node.links.edge(node.links[0]).length *= 0.5
g.styles.guide.append("cluster", cluster)
g.styles.guide.order = [
style.LIGHT, style.DARK, style.IMPORTANT, style.ROOT, style.BACK, "balance", "nurse"
]
return g
# 1.9.5.6
# Fixed circle_layout copy (number of orbits and starting angle weren't copied).
# 1.9.5.5
# graph.add_node and graph.add_edge call graph.new_node and graph.new_edge respectively.
# This should make subclassing nodes and edges a little easier.
# 1.9.5.4
# Fixex bug in spring_layout.tweak().
# Added directed=False parameter to dijkstra_shortest_path() and brandes_betweenness_centrality().
# 1.9.5.3
# Copies of nodes and edges correctly copy arbitrary attributes,
# e.g. edge.context, edge.relation and edge.author in a Perception graph.
# 1.9.5.2
# Reverted to old cluster.unique() (less fast but retains sort order).
# 1.9.5.1
# graph.draw() in push/pop.
# graph.node_id works like graph.node(id).
# Added graph.leaves property.
# Added graph.fringe() method.
# Added node.is_leaf property.
# Added node.can_reach().
# Added proximity.depth_first_search().
# graph.style.align supports RIGHT and CENTER.
# graph.layout.refresh() False rekindles the animation.
# import graph works outside NodeBox.
# 1.9.5
# Changed property names in spring_layout class.
# Added orbit property to circle_layout.
# Added force and repulsion properties to spring_layout.
# Increased default repulsion radius from 7 to 15.
# Added nurse behavior to the styleguide (edge length for leaves is 0.5).
# 1.9.4
# Edges now have a length property that controls individual attraction.
# 1.9.2.1
# proximity.eigenvector_centrality() yields warning
# instead of exception if it does not converge.
# Added heuristic parameter to proximity.dijkstra_shortest_path().
# Added layout.spring_layout.tweak()
# Added cluster.partition()
|
|
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Provides utilities to preprocess images for the Inception networks."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import tensorflow as tf
from tensorflow.python.ops import control_flow_ops
def apply_with_random_selector(x, func, num_cases):
"""Computes func(x, sel), with sel sampled from [0...num_cases-1].
Args:
x: input Tensor.
func: Python function to apply.
num_cases: Python int32, number of cases to sample sel from.
Returns:
The result of func(x, sel), where func receives the value of the
selector as a python integer, but sel is sampled dynamically.
"""
sel = tf.random_uniform([], maxval=num_cases, dtype=tf.int32)
# Pass the real x only to one of the func calls.
return control_flow_ops.merge([
func(control_flow_ops.switch(x, tf.equal(sel, case))[1], case)
for case in range(num_cases)])[0]
def distort_color(image, color_ordering=0, fast_mode=True, scope=None):
"""Distort the color of a Tensor image.
Each color distortion is non-commutative and thus ordering of the color ops
matters. Ideally we would randomly permute the ordering of the color ops.
Rather then adding that level of complication, we select a distinct ordering
of color ops for each preprocessing thread.
Args:
image: 3-D Tensor containing single image in [0, 1].
color_ordering: Python int, a type of distortion (valid values: 0-3).
fast_mode: Avoids slower ops (random_hue and random_contrast)
scope: Optional scope for name_scope.
Returns:
3-D Tensor color-distorted image on range [0, 1]
Raises:
ValueError: if color_ordering not in [0, 3]
"""
with tf.name_scope(scope, 'distort_color', [image]):
if fast_mode:
if color_ordering == 0:
image = tf.image.random_brightness(image, max_delta=32. / 255.)
image = tf.image.random_saturation(image, lower=0.5, upper=1.5)
else:
image = tf.image.random_saturation(image, lower=0.5, upper=1.5)
image = tf.image.random_brightness(image, max_delta=32. / 255.)
else:
if color_ordering == 0:
image = tf.image.random_brightness(image, max_delta=32. / 255.)
image = tf.image.random_saturation(image, lower=0.5, upper=1.5)
image = tf.image.random_hue(image, max_delta=0.2)
image = tf.image.random_contrast(image, lower=0.5, upper=1.5)
elif color_ordering == 1:
image = tf.image.random_saturation(image, lower=0.5, upper=1.5)
image = tf.image.random_brightness(image, max_delta=32. / 255.)
image = tf.image.random_contrast(image, lower=0.5, upper=1.5)
image = tf.image.random_hue(image, max_delta=0.2)
elif color_ordering == 2:
image = tf.image.random_contrast(image, lower=0.5, upper=1.5)
image = tf.image.random_hue(image, max_delta=0.2)
image = tf.image.random_brightness(image, max_delta=32. / 255.)
image = tf.image.random_saturation(image, lower=0.5, upper=1.5)
elif color_ordering == 3:
image = tf.image.random_hue(image, max_delta=0.2)
image = tf.image.random_saturation(image, lower=0.5, upper=1.5)
image = tf.image.random_contrast(image, lower=0.5, upper=1.5)
image = tf.image.random_brightness(image, max_delta=32. / 255.)
else:
raise ValueError('color_ordering must be in [0, 3]')
# The random_* ops do not necessarily clamp.
return tf.clip_by_value(image, 0.0, 1.0)
def distorted_bounding_box_crop(image,
bbox,
min_object_covered=0.1,
aspect_ratio_range=(0.75, 1.33),
area_range=(0.05, 1.0),
max_attempts=100,
scope=None):
"""Generates cropped_image using a one of the bboxes randomly distorted.
See `tf.image.sample_distorted_bounding_box` for more documentation.
Args:
image: 3-D Tensor of image (it will be converted to floats in [0, 1]).
bbox: 3-D float Tensor of bounding boxes arranged [1, num_boxes, coords]
where each coordinate is [0, 1) and the coordinates are arranged
as [ymin, xmin, ymax, xmax]. If num_boxes is 0 then it would use the whole
image.
min_object_covered: An optional `float`. Defaults to `0.1`. The cropped
area of the image must contain at least this fraction of any bounding box
supplied.
aspect_ratio_range: An optional list of `floats`. The cropped area of the
image must have an aspect ratio = width / height within this range.
area_range: An optional list of `floats`. The cropped area of the image
must contain a fraction of the supplied image within in this range.
max_attempts: An optional `int`. Number of attempts at generating a cropped
region of the image of the specified constraints. After `max_attempts`
failures, return the entire image.
scope: Optional scope for name_scope.
Returns:
A tuple, a 3-D Tensor cropped_image and the distorted bbox
"""
with tf.name_scope(scope, 'distorted_bounding_box_crop', [image, bbox]):
# Each bounding box has shape [1, num_boxes, box coords] and
# the coordinates are ordered [ymin, xmin, ymax, xmax].
# A large fraction of image datasets contain a human-annotated bounding
# box delineating the region of the image containing the object of interest.
# We choose to create a new bounding box for the object which is a randomly
# distorted version of the human-annotated bounding box that obeys an
# allowed range of aspect ratios, sizes and overlap with the human-annotated
# bounding box. If no box is supplied, then we assume the bounding box is
# the entire image.
sample_distorted_bounding_box = tf.image.sample_distorted_bounding_box(
tf.shape(image),
bounding_boxes=bbox,
min_object_covered=min_object_covered,
aspect_ratio_range=aspect_ratio_range,
area_range=area_range,
max_attempts=max_attempts,
use_image_if_no_bounding_boxes=True)
bbox_begin, bbox_size, distort_bbox = sample_distorted_bounding_box
# Crop the image to the specified bounding box.
cropped_image = tf.slice(image, bbox_begin, bbox_size)
return cropped_image, distort_bbox
def preprocess_for_train(image, height, width, bbox,
fast_mode=True,
scope=None):
"""Distort one image for training a network.
Distorting images provides a useful technique for augmenting the data
set during training in order to make the network invariant to aspects
of the image that do not effect the label.
Additionally it would create image_summaries to display the different
transformations applied to the image.
Args:
image: 3-D Tensor of image. If dtype is tf.float32 then the range should be
[0, 1], otherwise it would converted to tf.float32 assuming that the range
is [0, MAX], where MAX is largest positive representable number for
int(8/16/32) data type (see `tf.image.convert_image_dtype` for details).
height: integer
width: integer
bbox: 3-D float Tensor of bounding boxes arranged [1, num_boxes, coords]
where each coordinate is [0, 1) and the coordinates are arranged
as [ymin, xmin, ymax, xmax].
fast_mode: Optional boolean, if True avoids slower transformations (i.e.
bi-cubic resizing, random_hue or random_contrast).
scope: Optional scope for name_scope.
Returns:
3-D float Tensor of distorted image used for training with range [-1, 1].
"""
with tf.name_scope(scope, 'distort_image', [image, height, width, bbox]):
if bbox is None:
bbox = tf.constant([0.0, 0.0, 1.0, 1.0],
dtype=tf.float32,
shape=[1, 1, 4])
if image.dtype != tf.float32:
image = tf.image.convert_image_dtype(image, dtype=tf.float32)
# Each bounding box has shape [1, num_boxes, box coords] and
# the coordinates are ordered [ymin, xmin, ymax, xmax].
image_with_box = tf.image.draw_bounding_boxes(tf.expand_dims(image, 0),
bbox)
tf.image_summary('image_with_bounding_boxes', image_with_box)
distorted_image, distorted_bbox = distorted_bounding_box_crop(image, bbox)
# Restore the shape since the dynamic slice based upon the bbox_size loses
# the third dimension.
distorted_image.set_shape([None, None, 3])
image_with_distorted_box = tf.image.draw_bounding_boxes(
tf.expand_dims(image, 0), distorted_bbox)
tf.image_summary('images_with_distorted_bounding_box',
image_with_distorted_box)
# This resizing operation may distort the images because the aspect
# ratio is not respected. We select a resize method in a round robin
# fashion based on the thread number.
# Note that ResizeMethod contains 4 enumerated resizing methods.
# We select only 1 case for fast_mode bilinear.
num_resize_cases = 1 if fast_mode else 4
distorted_image = apply_with_random_selector(
distorted_image,
lambda x, method: tf.image.resize_images(x, [height, width], method=method),
num_cases=num_resize_cases)
tf.image_summary('cropped_resized_image',
tf.expand_dims(distorted_image, 0))
# Randomly flip the image horizontally.
distorted_image = tf.image.random_flip_left_right(distorted_image)
# Randomly distort the colors. There are 4 ways to do it.
distorted_image = apply_with_random_selector(
distorted_image,
lambda x, ordering: distort_color(x, ordering, fast_mode),
num_cases=4)
tf.image_summary('final_distorted_image',
tf.expand_dims(distorted_image, 0))
distorted_image = tf.sub(distorted_image, 0.5)
distorted_image = tf.mul(distorted_image, 2.0)
return distorted_image
def preprocess_for_eval(image, height, width,
central_fraction=0.875, scope=None):
"""Prepare one image for evaluation.
If height and width are specified it would output an image with that size by
applying resize_bilinear.
If central_fraction is specified it would cropt the central fraction of the
input image.
Args:
image: 3-D Tensor of image. If dtype is tf.float32 then the range should be
[0, 1], otherwise it would converted to tf.float32 assuming that the range
is [0, MAX], where MAX is largest positive representable number for
int(8/16/32) data type (see `tf.image.convert_image_dtype` for details)
height: integer
width: integer
central_fraction: Optional Float, fraction of the image to crop.
scope: Optional scope for name_scope.
Returns:
3-D float Tensor of prepared image.
"""
with tf.name_scope(scope, 'eval_image', [image, height, width]):
if image.dtype != tf.float32:
image = tf.image.convert_image_dtype(image, dtype=tf.float32)
# Crop the central region of the image with an area containing 87.5% of
# the original image.
if central_fraction:
image = tf.image.central_crop(image, central_fraction=central_fraction)
if height and width:
# Resize the image to the specified height and width.
image = tf.expand_dims(image, 0)
image = tf.image.resize_bilinear(image, [height, width],
align_corners=False)
image = tf.squeeze(image, [0])
image = tf.sub(image, 0.5)
image = tf.mul(image, 2.0)
return image
def preprocess_image(image, height, width,
is_training=False,
bbox=None,
fast_mode=True):
"""Pre-process one image for training or evaluation.
Args:
image: 3-D Tensor [height, width, channels] with the image.
height: integer, image expected height.
width: integer, image expected width.
is_training: Boolean. If true it would transform an image for train,
otherwise it would transform it for evaluation.
bbox: 3-D float Tensor of bounding boxes arranged [1, num_boxes, coords]
where each coordinate is [0, 1) and the coordinates are arranged as
[ymin, xmin, ymax, xmax].
fast_mode: Optional boolean, if True avoids slower transformations.
Returns:
3-D float Tensor containing an appropriately scaled image
Raises:
ValueError: if user does not provide bounding box
"""
if is_training:
return preprocess_for_train(image, height, width, bbox, fast_mode)
else:
return preprocess_for_eval(image, height, width)
|
|
import numpy as np
import pandas
def getPOCs():
return [p['cvcname'] for p in POC_dicts]
def getPOCInfo(critcol, critval, valcol):
values = list(filter(lambda poc: poc[critcol] == critval, POC_dicts))
if len(values) > 1:
raise ValueError('`getPOCInfo` found multiple records')
else:
return values[0][valcol]
def wqstd_template():
seasons = ['summer', 'autumn', 'winter', 'spring']
_template = pandas.DataFrame({
'parameter': [p['cvcname'] for p in POC_dicts],
'units': [p['conc_units']['plain'] for p in POC_dicts]
})
df = pandas.concat([_template.assign(season=s) for s in seasons])
df['influent median'] = np.nan
return df
LITERS_PER_CUBICMETER = 1.0e3
MICROGRAMS_PER_GRAM = 1.0e6
MILLIGRAMS_PER_GRAM = 1.0e3
POC_dicts = [
{
'cvcname': 'Aluminum (Al)',
'bmpname': 'Aluminum, Total',
'nsqdname': None,
'conc_units': {
'plain': 'ug/L',
'tex': r'\si[per-mode=symbol]{\micro\gram\per\liter}'
},
'load_units': 'g',
'load_factor': LITERS_PER_CUBICMETER / MICROGRAMS_PER_GRAM,
'group': 'A',
'include': False,
}, {
'cvcname': 'Cadmium (Cd)',
'bmpname': 'Cadmium, Total',
'nsqdname': 'Cadmium',
'conc_units': {
'plain': 'ug/L',
'tex': r'\si[per-mode=symbol]{\micro\gram\per\liter}'
},
'load_units': 'g',
'load_factor': LITERS_PER_CUBICMETER / MICROGRAMS_PER_GRAM,
'group': 'A',
'include': True,
}, {
'cvcname': 'Copper (Cu)',
'bmpname': 'Copper, Total',
'nsqdname': 'Copper',
'conc_units': {
'plain': 'ug/L',
'tex': r'\si[per-mode=symbol]{\micro\gram\per\liter}'
},
'load_units': 'g',
'load_factor': LITERS_PER_CUBICMETER / MICROGRAMS_PER_GRAM,
'group': 'A',
'include': True,
}, {
'cvcname': 'Iron (Fe)',
'bmpname': 'Iron, Total',
'nsqdname': 'Iron as Fe',
'conc_units': {
'plain': 'ug/L',
'tex': r'\si[per-mode=symbol]{\micro\gram\per\liter}'
},
'load_units': 'g',
'load_factor': LITERS_PER_CUBICMETER / MICROGRAMS_PER_GRAM,
'group': 'A',
'include': True,
}, {
'cvcname': 'Lead (Pb)',
'bmpname': 'Lead, Total',
'nsqdname': 'Lead',
'conc_units': {
'plain': 'ug/L',
'tex': r'\si[per-mode=symbol]{\micro\gram\per\liter}'
},
'load_units': 'g',
'load_factor': LITERS_PER_CUBICMETER / MICROGRAMS_PER_GRAM,
'group': 'A',
'include': True,
}, {
'cvcname': 'Nickel (Ni)',
'bmpname': 'Nickel, Total',
'nsqdname': 'Nickel',
'conc_units': {
'plain': 'ug/L',
'tex': r'\si[per-mode=symbol]{\micro\gram\per\liter}'
},
'load_units': 'g',
'load_factor': LITERS_PER_CUBICMETER / MICROGRAMS_PER_GRAM,
'group': 'A',
'include': True,
}, {
'cvcname': 'Zinc (Zn)',
'bmpname': 'Zinc, Total',
'nsqdname': 'Zinc',
'conc_units': {
'plain': 'ug/L',
'tex': r'\si[per-mode=symbol]{\micro\gram\per\liter}'
},
'load_units': 'g',
'load_factor': LITERS_PER_CUBICMETER / MICROGRAMS_PER_GRAM,
'group': 'A',
'include': True,
}, {
'cvcname': 'Dissolved Chloride (Cl)',
'bmpname': 'Chloride, Dissolved',
'nsqdname': 'Chloride',
'conc_units': {
'plain': 'mg/L',
'tex': r'\si[per-mode=symbol]{\micro\gram\per\liter}'
},
'load_units': 'g',
'load_factor': LITERS_PER_CUBICMETER / MILLIGRAMS_PER_GRAM,
'group': 'B',
'include': False,
}, {
'cvcname': 'Nitrate (N)',
'bmpname': 'Nitrogen, Nitrate (NO3) as N',
'nsqdname': 'Nitrate as N',
'conc_units': {
'plain': 'mg/L',
'tex': r'\si[per-mode=symbol]{\milli\gram\per\liter}'
},
'load_units': 'g',
'load_factor': LITERS_PER_CUBICMETER / MILLIGRAMS_PER_GRAM,
'group': 'B',
'include': True,
}, {
'cvcname': 'Nitrate + Nitrite',
'bmpname': 'Nitrogen, Nitrite (NO2) + Nitrate (NO3) as N',
'nsqdname': 'N02+NO3',
'conc_units': {
'plain': 'mg/L',
'tex': r'\si[per-mode=symbol]{\micro\gram\per\liter}'
},
'load_units': 'g',
'load_factor': LITERS_PER_CUBICMETER / MILLIGRAMS_PER_GRAM,
'group': 'B',
'include': True,
}, {
'cvcname': 'Total Kjeldahl Nitrogen (TKN)',
'bmpname': 'Kjeldahl nitrogen (TKN)',
'nsqdname': 'Total Kjeldahl Nitrogen',
'conc_units': {
'plain': 'mg/L',
'tex': r'\si[per-mode=symbol]{\milli\gram\per\liter}'
},
'load_units': 'g',
'load_factor': LITERS_PER_CUBICMETER / MILLIGRAMS_PER_GRAM,
'group': 'B',
'include': False,
}, {
'cvcname': 'Orthophosphate (P)',
'bmpname': 'Phosphorus, orthophosphate as P',
'nsqdname': 'Phosphate Ortho as P',
'conc_units': {
'plain': 'mg/L',
'tex': r'\si[per-mode=symbol]{\milli\gram\per\liter}'
},
'load_units': 'g',
'load_factor': LITERS_PER_CUBICMETER / MILLIGRAMS_PER_GRAM,
'group': 'B',
'include': False,
}, {
'cvcname': 'Total Phosphorus',
'bmpname': 'Phosphorus as P, Total',
'nsqdname': 'Phosphorous as P',
'conc_units': {
'plain': 'mg/L',
'tex': r'\si[per-mode=symbol]{\milli\gram\per\liter}'
},
'load_units': 'g',
'load_factor': LITERS_PER_CUBICMETER / MILLIGRAMS_PER_GRAM,
'group': 'B',
'include': True,
}, {
'cvcname': 'Escherichia coli',
'bmpname': 'Escherichia coli',
'nsqdname': 'E. Coli',
'conc_units': {
'plain': 'CFU/100 mL',
'tex': r'CFU/100 mL'
},
'load_units': 'CFU',
'load_factor': 10 * LITERS_PER_CUBICMETER,
'group': 'B',
'include': False,
}, {
'cvcname': 'Total Oil & Grease',
'bmpname': None,
'nsqdname': 'Oil and Grease',
'conc_units': {
'plain': 'mg/L',
'tex': r'\si[per-mode=symbol]{\milli\gram\per\liter}'
},
'load_units': 'g',
'load_factor': LITERS_PER_CUBICMETER / MILLIGRAMS_PER_GRAM,
'group': 'B',
'include': False,
}, {
'cvcname': 'Total Suspended Solids',
'bmpname': 'Total suspended solids',
'nsqdname': 'Total Suspended Solids',
'conc_units': {
'plain': 'mg/L',
'tex': r'\si[per-mode=symbol]{\milli\gram\per\liter}'
},
'load_units': 'g',
'load_factor': LITERS_PER_CUBICMETER / MILLIGRAMS_PER_GRAM,
'group': 'A',
'include': True,
},
]
|
|
# Copyright 2012 United States Government as represented by the
# Administrator of the National Aeronautics and Space Administration.
# All Rights Reserved.
#
# Copyright 2013 NTT MCL Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import json
from django.conf import settings
from django.http import HttpResponse
from django.urls import reverse
from django.urls import reverse_lazy
from django.utils.translation import ugettext_lazy as _
from django.views.generic import View
from horizon import exceptions
from horizon import tabs
from horizon.utils.lazy_encoder import LazyTranslationEncoder
from openstack_dashboard import api
from openstack_dashboard.dashboards.project.network_topology import forms
from openstack_dashboard.dashboards.project.network_topology.instances \
import tables as instances_tables
from openstack_dashboard.dashboards.project.network_topology.networks \
import tables as networks_tables
from openstack_dashboard.dashboards.project.network_topology.ports \
import tables as ports_tables
from openstack_dashboard.dashboards.project.network_topology.routers \
import tables as routers_tables
from openstack_dashboard.dashboards.project.network_topology.subnets \
import tables as subnets_tables
from openstack_dashboard.dashboards.project.network_topology \
import tabs as topology_tabs
from openstack_dashboard.dashboards.project.network_topology import utils
from openstack_dashboard.dashboards.project.instances.tables import \
STATUS_DISPLAY_CHOICES as instance_choices
from openstack_dashboard.dashboards.project.instances import\
views as i_views
from openstack_dashboard.dashboards.project.instances.workflows import\
create_instance as i_workflows
from openstack_dashboard.dashboards.project.networks.subnets import\
views as s_views
from openstack_dashboard.dashboards.project.networks.subnets import\
workflows as s_workflows
from openstack_dashboard.dashboards.project.networks.tables import \
DISPLAY_CHOICES as network_display_choices
from openstack_dashboard.dashboards.project.networks.tables import \
STATUS_DISPLAY_CHOICES as network_choices
from openstack_dashboard.dashboards.project.networks import\
views as n_views
from openstack_dashboard.dashboards.project.networks import\
workflows as n_workflows
from openstack_dashboard.dashboards.project.routers.ports.tables import \
DISPLAY_CHOICES as ports_choices
from openstack_dashboard.dashboards.project.routers.ports.tables import \
STATUS_DISPLAY_CHOICES as ports_status_choices
from openstack_dashboard.dashboards.project.routers.ports import\
views as p_views
from openstack_dashboard.dashboards.project.routers.tables import \
ADMIN_STATE_DISPLAY_CHOICES as routers_admin_choices
from openstack_dashboard.dashboards.project.routers.tables import \
STATUS_DISPLAY_CHOICES as routers_status_choices
from openstack_dashboard.dashboards.project.routers import\
views as r_views
from openstack_dashboard import policy
from openstack_dashboard.utils import settings as setting_utils
# List of known server statuses that wont connect to the console
console_invalid_status = {
'shutoff', 'suspended', 'resize', 'verify_resize',
'revert_resize', 'migrating', 'build', 'shelved',
'shelved_offloaded'}
class TranslationHelper(object):
"""Helper class to provide the translations.
This allows the network topology to access the translated strings
for various resources defined in other parts of the code.
"""
def __init__(self):
# turn translation tuples into dicts for easy access
self.instance = dict(instance_choices)
self.network = dict(network_choices)
self.network.update(dict(network_display_choices))
self.router = dict(routers_admin_choices)
self.router.update(dict(routers_status_choices))
self.port = dict(ports_choices)
self.port.update(dict(ports_status_choices))
# and turn all the keys into Uppercase for simple access
self.instance = {k.upper(): v for k, v in self.instance.items()}
self.network = {k.upper(): v for k, v in self.network.items()}
self.router = {k.upper(): v for k, v in self.router.items()}
self.port = {k.upper(): v for k, v in self.port.items()}
class NTAddInterfaceView(p_views.AddInterfaceView):
success_url = "horizon:project:network_topology:index"
failure_url = "horizon:project:network_topology:index"
def get_success_url(self):
return reverse("horizon:project:network_topology:index")
def get_context_data(self, **kwargs):
context = super(NTAddInterfaceView, self).get_context_data(**kwargs)
context['form_url'] = 'horizon:project:network_topology:interface'
return context
class NTCreateRouterView(r_views.CreateView):
form_class = forms.NTCreateRouterForm
success_url = reverse_lazy("horizon:project:network_topology:index")
submit_url = reverse_lazy("horizon:project:network_topology:createrouter")
page_title = _("Create a Router")
class NTCreateNetwork(n_workflows.CreateNetwork):
def get_success_url(self):
return reverse("horizon:project:network_topology:index")
def get_failure_url(self):
return reverse("horizon:project:network_topology:index")
class NTCreateNetworkView(n_views.CreateView):
workflow_class = NTCreateNetwork
class NTLaunchInstance(i_workflows.LaunchInstance):
success_url = "horizon:project:network_topology:index"
class NTLaunchInstanceView(i_views.LaunchInstanceView):
workflow_class = NTLaunchInstance
class NTCreateSubnet(s_workflows.CreateSubnet):
def get_success_url(self):
return reverse("horizon:project:network_topology:index")
def get_failure_url(self):
return reverse("horizon:project:network_topology:index")
class NTCreateSubnetView(s_views.CreateView):
workflow_class = NTCreateSubnet
class InstanceView(i_views.IndexView):
table_class = instances_tables.InstancesTable
template_name = 'project/network_topology/iframe.html'
def get_data(self):
self._more = False
# Get instance by id, return a list of one instance
# If failed to retrieve the instance, return an empty list
try:
instance_id = self.request.GET.get("id", "")
instance = api.nova.server_get(self.request, instance_id)
return [instance]
except Exception:
exceptions.handle(self.request,
_('Unable to retrieve the instance.'))
return []
class RouterView(r_views.IndexView):
table_class = routers_tables.RoutersTable
template_name = 'project/network_topology/iframe.html'
class NetworkView(n_views.IndexView):
table_class = networks_tables.NetworksTable
template_name = 'project/network_topology/iframe.html'
class RouterDetailView(r_views.DetailView):
table_classes = (ports_tables.PortsTable, )
template_name = 'project/network_topology/iframe.html'
def get_interfaces_data(self):
pass
class NetworkDetailView(n_views.DetailView):
table_classes = (subnets_tables.SubnetsTable, )
template_name = 'project/network_topology/iframe.html'
class NetworkTopologyView(tabs.TabView):
tab_group_class = topology_tabs.TopologyTabs
template_name = 'project/network_topology/index.html'
page_title = _("Network Topology")
def get_context_data(self, **kwargs):
context = super(NetworkTopologyView, self).get_context_data(**kwargs)
return utils.get_context(self.request, context)
class JSONView(View):
trans = TranslationHelper()
@property
def is_router_enabled(self):
return setting_utils.get_dict_config('OPENSTACK_NEUTRON_NETWORK',
'enable_router')
def add_resource_url(self, view, resources):
tenant_id = self.request.user.tenant_id
for resource in resources:
if (resource.get('tenant_id') and
tenant_id != resource.get('tenant_id')):
continue
resource['url'] = reverse(view, None, [str(resource['id'])])
def _check_router_external_port(self, ports, router_id, network_id):
for port in ports:
if (port['network_id'] == network_id and
port['device_id'] == router_id):
return True
return False
def _get_servers(self, request):
# Get nova data
try:
servers, more = api.nova.server_list(request)
except Exception:
servers = []
data = []
console_type = settings.CONSOLE_TYPE
# lowercase of the keys will be used at the end of the console URL.
for server in servers:
server_data = {'name': server.name,
'status': self.trans.instance[server.status],
'original_status': server.status,
'task': getattr(server, 'OS-EXT-STS:task_state'),
'id': server.id}
# Avoid doing extra calls for console if the server is in
# a invalid status for console connection
if server.status.lower() not in console_invalid_status:
if console_type:
server_data['console'] = 'auto_console'
data.append(server_data)
self.add_resource_url('horizon:project:instances:detail', data)
return data
def _get_networks(self, request):
# Get neutron data
# if we didn't specify tenant_id, all networks shown as admin user.
# so it is need to specify the networks. However there is no need to
# specify tenant_id for subnet. The subnet which belongs to the public
# network is needed to draw subnet information on public network.
try:
# NOTE(amotoki):
# To support auto allocated network in the network topology view,
# we need to handle the auto allocated network which haven't been
# created yet. The current network topology logic cannot not handle
# fake network ID properly, so we temporarily exclude
# pre-auto-allocated-network from the network topology view.
# It would be nice if someone is interested in supporting it.
neutron_networks = api.neutron.network_list_for_tenant(
request,
request.user.tenant_id,
include_pre_auto_allocate=False)
except Exception:
neutron_networks = []
networks = []
for network in neutron_networks:
allow_delete_subnet = policy.check(
(("network", "delete_subnet"),),
request,
target={'network:tenant_id': getattr(network,
'tenant_id', None)}
)
obj = {'name': network.name_or_id,
'id': network.id,
'subnets': [{'id': subnet.id,
'cidr': subnet.cidr}
for subnet in network.subnets],
'status': self.trans.network[network.status],
'allow_delete_subnet': allow_delete_subnet,
'original_status': network.status,
'router:external': network['router:external']}
self.add_resource_url('horizon:project:networks:subnets:detail',
obj['subnets'])
networks.append(obj)
# Add public networks to the networks list
if self.is_router_enabled:
try:
neutron_public_networks = api.neutron.network_list(
request,
**{'router:external': True})
except Exception:
neutron_public_networks = []
my_network_ids = [net['id'] for net in networks]
for publicnet in neutron_public_networks:
if publicnet.id in my_network_ids:
continue
try:
subnets = [{'id': subnet.id,
'cidr': subnet.cidr}
for subnet in publicnet.subnets]
self.add_resource_url(
'horizon:project:networks:subnets:detail', subnets)
except Exception:
subnets = []
networks.append({
'name': publicnet.name_or_id,
'id': publicnet.id,
'subnets': subnets,
'status': self.trans.network[publicnet.status],
'original_status': publicnet.status,
'router:external': publicnet['router:external']})
self.add_resource_url('horizon:project:networks:detail',
networks)
return sorted(networks,
key=lambda x: x.get('router:external'),
reverse=True)
def _get_routers(self, request):
if not self.is_router_enabled:
return []
try:
neutron_routers = api.neutron.router_list(
request,
tenant_id=request.user.tenant_id)
except Exception:
neutron_routers = []
routers = [{'id': router.id,
'name': router.name_or_id,
'status': self.trans.router[router.status],
'original_status': router.status,
'external_gateway_info': router.external_gateway_info}
for router in neutron_routers]
self.add_resource_url('horizon:project:routers:detail', routers)
return routers
def _get_ports(self, request, networks):
try:
neutron_ports = api.neutron.port_list(request)
except Exception:
neutron_ports = []
# we should filter out ports connected to non tenant networks
# which they have no visibility to
tenant_network_ids = [network['id'] for network in networks]
ports = [{'id': port.id,
'network_id': port.network_id,
'device_id': port.device_id,
'fixed_ips': port.fixed_ips,
'device_owner': port.device_owner,
'status': self.trans.port[port.status],
'original_status': port.status}
for port in neutron_ports
if (port.device_owner != 'network:router_ha_interface' and
port.network_id in tenant_network_ids)]
self.add_resource_url('horizon:project:networks:ports:detail',
ports)
return ports
def _prepare_gateway_ports(self, routers, ports):
# user can't see port on external network. so we are
# adding fake port based on router information
for router in routers:
external_gateway_info = router.get('external_gateway_info')
if not external_gateway_info:
continue
external_network = external_gateway_info.get(
'network_id')
if not external_network:
continue
if self._check_router_external_port(ports,
router['id'],
external_network):
continue
fake_port = {'id': 'gateway%s' % external_network,
'network_id': external_network,
'device_id': router['id'],
'fixed_ips': []}
ports.append(fake_port)
def get(self, request, *args, **kwargs):
networks = self._get_networks(request)
data = {'servers': self._get_servers(request),
'networks': networks,
'ports': self._get_ports(request, networks),
'routers': self._get_routers(request)}
self._prepare_gateway_ports(data['routers'], data['ports'])
json_string = json.dumps(data, cls=LazyTranslationEncoder,
ensure_ascii=False)
return HttpResponse(json_string, content_type='text/json')
|
|
# -*- coding: utf-8 -*-
"""
pygments.lexers.rebol
~~~~~~~~~~~~~~~~~~~~~
Lexers for the REBOL and related languages.
:copyright: Copyright 2006-2014 by the Pygments team, see AUTHORS.
:license: BSD, see LICENSE for details.
"""
import re
from pygments.lexer import RegexLexer, bygroups
from pygments.token import Text, Comment, Operator, Keyword, Name, String, \
Number, Generic, Whitespace
__all__ = ['RebolLexer', 'RedLexer']
class RebolLexer(RegexLexer):
"""
A `REBOL <http://www.rebol.com/>`_ lexer.
.. versionadded:: 1.1
"""
name = 'REBOL'
aliases = ['rebol']
filenames = ['*.r', '*.r3', '*.reb']
mimetypes = ['text/x-rebol']
flags = re.IGNORECASE | re.MULTILINE
escape_re = r'(?:\^\([0-9a-f]{1,4}\)*)'
def word_callback(lexer, match):
word = match.group()
if re.match(".*:$", word):
yield match.start(), Generic.Subheading, word
elif re.match(
r'(native|alias|all|any|as-string|as-binary|bind|bound\?|case|'
r'catch|checksum|comment|debase|dehex|exclude|difference|disarm|'
r'either|else|enbase|foreach|remove-each|form|free|get|get-env|if|'
r'in|intersect|loop|minimum-of|maximum-of|mold|new-line|'
r'new-line\?|not|now|prin|print|reduce|compose|construct|repeat|'
r'reverse|save|script\?|set|shift|switch|throw|to-hex|trace|try|'
r'type\?|union|unique|unless|unprotect|unset|until|use|value\?|'
r'while|compress|decompress|secure|open|close|read|read-io|'
r'write-io|write|update|query|wait|input\?|exp|log-10|log-2|'
r'log-e|square-root|cosine|sine|tangent|arccosine|arcsine|'
r'arctangent|protect|lowercase|uppercase|entab|detab|connected\?|'
r'browse|launch|stats|get-modes|set-modes|to-local-file|'
r'to-rebol-file|encloak|decloak|create-link|do-browser|bind\?|'
r'hide|draw|show|size-text|textinfo|offset-to-caret|'
r'caret-to-offset|local-request-file|rgb-to-hsv|hsv-to-rgb|'
r'crypt-strength\?|dh-make-key|dh-generate-key|dh-compute-key|'
r'dsa-make-key|dsa-generate-key|dsa-make-signature|'
r'dsa-verify-signature|rsa-make-key|rsa-generate-key|'
r'rsa-encrypt)$', word):
yield match.start(), Name.Builtin, word
elif re.match(
r'(add|subtract|multiply|divide|remainder|power|and~|or~|xor~|'
r'minimum|maximum|negate|complement|absolute|random|head|tail|'
r'next|back|skip|at|pick|first|second|third|fourth|fifth|sixth|'
r'seventh|eighth|ninth|tenth|last|path|find|select|make|to|copy\*|'
r'insert|remove|change|poke|clear|trim|sort|min|max|abs|cp|'
r'copy)$', word):
yield match.start(), Name.Function, word
elif re.match(
r'(error|source|input|license|help|install|echo|Usage|with|func|'
r'throw-on-error|function|does|has|context|probe|\?\?|as-pair|'
r'mod|modulo|round|repend|about|set-net|append|join|rejoin|reform|'
r'remold|charset|array|replace|move|extract|forskip|forall|alter|'
r'first+|also|take|for|forever|dispatch|attempt|what-dir|'
r'change-dir|clean-path|list-dir|dirize|rename|split-path|delete|'
r'make-dir|delete-dir|in-dir|confirm|dump-obj|upgrade|what|'
r'build-tag|process-source|build-markup|decode-cgi|read-cgi|'
r'write-user|save-user|set-user-name|protect-system|parse-xml|'
r'cvs-date|cvs-version|do-boot|get-net-info|desktop|layout|'
r'scroll-para|get-face|alert|set-face|uninstall|unfocus|'
r'request-dir|center-face|do-events|net-error|decode-url|'
r'parse-header|parse-header-date|parse-email-addrs|import-email|'
r'send|build-attach-body|resend|show-popup|hide-popup|open-events|'
r'find-key-face|do-face|viewtop|confine|find-window|'
r'insert-event-func|remove-event-func|inform|dump-pane|dump-face|'
r'flag-face|deflag-face|clear-fields|read-net|vbug|path-thru|'
r'read-thru|load-thru|do-thru|launch-thru|load-image|'
r'request-download|do-face-alt|set-font|set-para|get-style|'
r'set-style|make-face|stylize|choose|hilight-text|hilight-all|'
r'unlight-text|focus|scroll-drag|clear-face|reset-face|scroll-face|'
r'resize-face|load-stock|load-stock-block|notify|request|flash|'
r'request-color|request-pass|request-text|request-list|'
r'request-date|request-file|dbug|editor|link-relative-path|'
r'emailer|parse-error)$', word):
yield match.start(), Keyword.Namespace, word
elif re.match(
r'(halt|quit|do|load|q|recycle|call|run|ask|parse|view|unview|'
r'return|exit|break)$', word):
yield match.start(), Name.Exception, word
elif re.match('REBOL$', word):
yield match.start(), Generic.Heading, word
elif re.match("to-.*", word):
yield match.start(), Keyword, word
elif re.match('(\+|-|\*|/|//|\*\*|and|or|xor|=\?|=|==|<>|<|>|<=|>=)$',
word):
yield match.start(), Operator, word
elif re.match(".*\?$", word):
yield match.start(), Keyword, word
elif re.match(".*\!$", word):
yield match.start(), Keyword.Type, word
elif re.match("'.*", word):
yield match.start(), Name.Variable.Instance, word # lit-word
elif re.match("#.*", word):
yield match.start(), Name.Label, word # issue
elif re.match("%.*", word):
yield match.start(), Name.Decorator, word # file
else:
yield match.start(), Name.Variable, word
tokens = {
'root': [
(r'[^R]+', Comment),
(r'REBOL\s+\[', Generic.Strong, 'script'),
(r'R', Comment)
],
'script': [
(r'\s+', Text),
(r'#"', String.Char, 'char'),
(r'#\{[0-9a-f]*\}', Number.Hex),
(r'2#\{', Number.Hex, 'bin2'),
(r'64#\{[0-9a-z+/=\s]*\}', Number.Hex),
(r'"', String, 'string'),
(r'\{', String, 'string2'),
(r';#+.*\n', Comment.Special),
(r';\*+.*\n', Comment.Preproc),
(r';.*\n', Comment),
(r'%"', Name.Decorator, 'stringFile'),
(r'%[^(^{")\s\[\]]+', Name.Decorator),
(r'[+-]?([a-z]{1,3})?\$\d+(\.\d+)?', Number.Float), # money
(r'[+-]?\d+\:\d+(\:\d+)?(\.\d+)?', String.Other), # time
(r'\d+[\-/][0-9a-z]+[\-/]\d+(\/\d+\:\d+((\:\d+)?'
r'([.\d+]?([+-]?\d+:\d+)?)?)?)?', String.Other), # date
(r'\d+(\.\d+)+\.\d+', Keyword.Constant), # tuple
(r'\d+X\d+', Keyword.Constant), # pair
(r'[+-]?\d+(\'\d+)?([.,]\d*)?E[+-]?\d+', Number.Float),
(r'[+-]?\d+(\'\d+)?[.,]\d*', Number.Float),
(r'[+-]?\d+(\'\d+)?', Number),
(r'[\[\]()]', Generic.Strong),
(r'[a-z]+[^(^{"\s:)]*://[^(^{"\s)]*', Name.Decorator), # url
(r'mailto:[^(^{"@\s)]+@[^(^{"@\s)]+', Name.Decorator), # url
(r'[^(^{"@\s)]+@[^(^{"@\s)]+', Name.Decorator), # email
(r'comment\s"', Comment, 'commentString1'),
(r'comment\s\{', Comment, 'commentString2'),
(r'comment\s\[', Comment, 'commentBlock'),
(r'comment\s[^(\s{"\[]+', Comment),
(r'/[^(^{")\s/[\]]*', Name.Attribute),
(r'([^(^{")\s/[\]]+)(?=[:({"\s/\[\]])', word_callback),
(r'<[\w:.-]*>', Name.Tag),
(r'<[^(<>\s")]+', Name.Tag, 'tag'),
(r'([^(^{")\s]+)', Text),
],
'string': [
(r'[^(^")]+', String),
(escape_re, String.Escape),
(r'[(|)]+', String),
(r'\^.', String.Escape),
(r'"', String, '#pop'),
],
'string2': [
(r'[^(^{})]+', String),
(escape_re, String.Escape),
(r'[(|)]+', String),
(r'\^.', String.Escape),
(r'\{', String, '#push'),
(r'\}', String, '#pop'),
],
'stringFile': [
(r'[^(^")]+', Name.Decorator),
(escape_re, Name.Decorator),
(r'\^.', Name.Decorator),
(r'"', Name.Decorator, '#pop'),
],
'char': [
(escape_re + '"', String.Char, '#pop'),
(r'\^."', String.Char, '#pop'),
(r'."', String.Char, '#pop'),
],
'tag': [
(escape_re, Name.Tag),
(r'"', Name.Tag, 'tagString'),
(r'[^(<>\r\n")]+', Name.Tag),
(r'>', Name.Tag, '#pop'),
],
'tagString': [
(r'[^(^")]+', Name.Tag),
(escape_re, Name.Tag),
(r'[(|)]+', Name.Tag),
(r'\^.', Name.Tag),
(r'"', Name.Tag, '#pop'),
],
'tuple': [
(r'(\d+\.)+', Keyword.Constant),
(r'\d+', Keyword.Constant, '#pop'),
],
'bin2': [
(r'\s+', Number.Hex),
(r'([01]\s*){8}', Number.Hex),
(r'\}', Number.Hex, '#pop'),
],
'commentString1': [
(r'[^(^")]+', Comment),
(escape_re, Comment),
(r'[(|)]+', Comment),
(r'\^.', Comment),
(r'"', Comment, '#pop'),
],
'commentString2': [
(r'[^(^{})]+', Comment),
(escape_re, Comment),
(r'[(|)]+', Comment),
(r'\^.', Comment),
(r'\{', Comment, '#push'),
(r'\}', Comment, '#pop'),
],
'commentBlock': [
(r'\[', Comment, '#push'),
(r'\]', Comment, '#pop'),
(r'"', Comment, "commentString1"),
(r'\{', Comment, "commentString2"),
(r'[^(\[\]"{)]+', Comment),
],
}
def analyse_text(text):
"""
Check if code contains REBOL header and so it probably not R code
"""
if re.match(r'^\s*REBOL\s*\[', text, re.IGNORECASE):
# The code starts with REBOL header
return 1.0
elif re.search(r'\s*REBOL\s*[', text, re.IGNORECASE):
# The code contains REBOL header but also some text before it
return 0.5
class RedLexer(RegexLexer):
"""
A `Red-language <http://www.red-lang.org/>`_ lexer.
.. versionadded:: 2.0
"""
name = 'Red'
aliases = ['red', 'red/system']
filenames = ['*.red', '*.reds']
mimetypes = ['text/x-red', 'text/x-red-system']
flags = re.IGNORECASE | re.MULTILINE
escape_re = r'(?:\^\([0-9a-f]{1,4}\)*)'
def word_callback(lexer, match):
word = match.group()
if re.match(".*:$", word):
yield match.start(), Generic.Subheading, word
elif re.match(r'(if|unless|either|any|all|while|until|loop|repeat|'
r'foreach|forall|func|function|does|has|switch|'
r'case|reduce|compose|get|set|print|prin|equal\?|'
r'not-equal\?|strict-equal\?|lesser\?|greater\?|lesser-or-equal\?|'
r'greater-or-equal\?|same\?|not|type\?|stats|'
r'bind|union|replace|charset|routine)$', word):
yield match.start(), Name.Builtin, word
elif re.match(r'(make|random|reflect|to|form|mold|absolute|add|divide|multiply|negate|'
r'power|remainder|round|subtract|even\?|odd\?|and~|complement|or~|xor~|'
r'append|at|back|change|clear|copy|find|head|head\?|index\?|insert|'
r'length\?|next|pick|poke|remove|reverse|select|sort|skip|swap|tail|tail\?|'
r'take|trim|create|close|delete|modify|open|open\?|query|read|rename|'
r'update|write)$', word):
yield match.start(), Name.Function, word
elif re.match(r'(yes|on|no|off|true|false|tab|cr|lf|newline|escape|slash|sp|space|null|'
r'none|crlf|dot|null-byte)$', word):
yield match.start(), Name.Builtin.Pseudo, word
elif re.match(r'(#system-global|#include|#enum|#define|#either|#if|#import|#export|'
r'#switch|#default|#get-definition)$', word):
yield match.start(), Keyword.Namespace, word
elif re.match(r'(system|halt|quit|quit-return|do|load|q|recycle|call|run|ask|parse|'
r'raise-error|return|exit|break|alias|push|pop|probe|\?\?|spec-of|body-of|'
r'quote|forever)$', word):
yield match.start(), Name.Exception, word
elif re.match(r'(action\?|block\?|char\?|datatype\?|file\?|function\?|get-path\?|zero\?|'
r'get-word\?|integer\?|issue\?|lit-path\?|lit-word\?|logic\?|native\?|'
r'op\?|paren\?|path\?|refinement\?|set-path\?|set-word\?|string\?|unset\?|'
r'any-struct\?|none\?|word\?|any-series\?)$', word):
yield match.start(), Keyword, word
elif re.match(r'(JNICALL|stdcall|cdecl|infix)$', word):
yield match.start(), Keyword.Namespace, word
elif re.match("to-.*", word):
yield match.start(), Keyword, word
elif re.match('(\+|-\*\*|-|\*\*|//|/|\*|and|or|xor|=\?|===|==|=|<>|<=|>=|'
'<<<|>>>|<<|>>|<|>%)$', word):
yield match.start(), Operator, word
elif re.match(".*\!$", word):
yield match.start(), Keyword.Type, word
elif re.match("'.*", word):
yield match.start(), Name.Variable.Instance, word # lit-word
elif re.match("#.*", word):
yield match.start(), Name.Label, word # issue
elif re.match("%.*", word):
yield match.start(), Name.Decorator, word # file
elif re.match(":.*", word):
yield match.start(), Generic.Subheading, word # get-word
else:
yield match.start(), Name.Variable, word
tokens = {
'root': [
(r'[^R]+', Comment),
(r'Red/System\s+\[', Generic.Strong, 'script'),
(r'Red\s+\[', Generic.Strong, 'script'),
(r'R', Comment)
],
'script': [
(r'\s+', Text),
(r'#"', String.Char, 'char'),
(r'#\{[0-9a-f\s]*\}', Number.Hex),
(r'2#\{', Number.Hex, 'bin2'),
(r'64#\{[0-9a-z+/=\s]*\}', Number.Hex),
(r'([0-9a-f]+)(h)((\s)|(?=[\[\]{}"()]))',
bygroups(Number.Hex, Name.Variable, Whitespace)),
(r'"', String, 'string'),
(r'\{', String, 'string2'),
(r';#+.*\n', Comment.Special),
(r';\*+.*\n', Comment.Preproc),
(r';.*\n', Comment),
(r'%"', Name.Decorator, 'stringFile'),
(r'%[^(^{")\s\[\]]+', Name.Decorator),
(r'[+-]?([a-z]{1,3})?\$\d+(\.\d+)?', Number.Float), # money
(r'[+-]?\d+\:\d+(\:\d+)?(\.\d+)?', String.Other), # time
(r'\d+[\-/][0-9a-z]+[\-/]\d+(/\d+:\d+((:\d+)?'
r'([\.\d+]?([+-]?\d+:\d+)?)?)?)?', String.Other), # date
(r'\d+(\.\d+)+\.\d+', Keyword.Constant), # tuple
(r'\d+X\d+', Keyword.Constant), # pair
(r'[+-]?\d+(\'\d+)?([.,]\d*)?E[+-]?\d+', Number.Float),
(r'[+-]?\d+(\'\d+)?[.,]\d*', Number.Float),
(r'[+-]?\d+(\'\d+)?', Number),
(r'[\[\]()]', Generic.Strong),
(r'[a-z]+[^(^{"\s:)]*://[^(^{"\s)]*', Name.Decorator), # url
(r'mailto:[^(^{"@\s)]+@[^(^{"@\s)]+', Name.Decorator), # url
(r'[^(^{"@\s)]+@[^(^{"@\s)]+', Name.Decorator), # email
(r'comment\s"', Comment, 'commentString1'),
(r'comment\s\{', Comment, 'commentString2'),
(r'comment\s\[', Comment, 'commentBlock'),
(r'comment\s[^(\s{"\[]+', Comment),
(r'/[^(^{^")\s/[\]]*', Name.Attribute),
(r'([^(^{^")\s/[\]]+)(?=[:({"\s/\[\]])', word_callback),
(r'<[\w:.-]*>', Name.Tag),
(r'<[^(<>\s")]+', Name.Tag, 'tag'),
(r'([^(^{")\s]+)', Text),
],
'string': [
(r'[^(^")]+', String),
(escape_re, String.Escape),
(r'[(|)]+', String),
(r'\^.', String.Escape),
(r'"', String, '#pop'),
],
'string2': [
(r'[^(^{})]+', String),
(escape_re, String.Escape),
(r'[(|)]+', String),
(r'\^.', String.Escape),
(r'\{', String, '#push'),
(r'\}', String, '#pop'),
],
'stringFile': [
(r'[^(^")]+', Name.Decorator),
(escape_re, Name.Decorator),
(r'\^.', Name.Decorator),
(r'"', Name.Decorator, '#pop'),
],
'char': [
(escape_re + '"', String.Char, '#pop'),
(r'\^."', String.Char, '#pop'),
(r'."', String.Char, '#pop'),
],
'tag': [
(escape_re, Name.Tag),
(r'"', Name.Tag, 'tagString'),
(r'[^(<>\r\n")]+', Name.Tag),
(r'>', Name.Tag, '#pop'),
],
'tagString': [
(r'[^(^")]+', Name.Tag),
(escape_re, Name.Tag),
(r'[(|)]+', Name.Tag),
(r'\^.', Name.Tag),
(r'"', Name.Tag, '#pop'),
],
'tuple': [
(r'(\d+\.)+', Keyword.Constant),
(r'\d+', Keyword.Constant, '#pop'),
],
'bin2': [
(r'\s+', Number.Hex),
(r'([01]\s*){8}', Number.Hex),
(r'\}', Number.Hex, '#pop'),
],
'commentString1': [
(r'[^(^")]+', Comment),
(escape_re, Comment),
(r'[(|)]+', Comment),
(r'\^.', Comment),
(r'"', Comment, '#pop'),
],
'commentString2': [
(r'[^(^{})]+', Comment),
(escape_re, Comment),
(r'[(|)]+', Comment),
(r'\^.', Comment),
(r'\{', Comment, '#push'),
(r'\}', Comment, '#pop'),
],
'commentBlock': [
(r'\[', Comment, '#push'),
(r'\]', Comment, '#pop'),
(r'"', Comment, "commentString1"),
(r'\{', Comment, "commentString2"),
(r'[^(\[\]"{)]+', Comment),
],
}
|
|
# -*- cpy-indent-level: 4; indent-tabs-mode: nil -*-
# ex: set expandtab softtabstop=4 shiftwidth=4:
#
# Copyright (C) 2011,2012,2013,2014,2015,2016,2017 Contributor
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
""" Routines to query information from QIP """
import heapq
import re
from ipaddress import IPv4Address, IPv4Network
from six import text_type
from aquilon.exceptions_ import PartialError
from aquilon.aqdb.model import (NetworkEnvironment, Network, RouterAddress,
ARecord, AddressAssignment, Building, Bunker,
NetworkCompartment)
from aquilon.worker.dbwrappers.dns import delete_dns_record
from aquilon.worker.dbwrappers.network import fix_foreign_links
from aquilon.worker.templates import Plenary, PlenaryCollection
from aquilon.config import Config
from sqlalchemy.orm import subqueryload
from sqlalchemy.sql import update, and_, or_
# Add a wrapper around heapq.heappop because handling None is simpler than
# adding try/except blocks everywhere
def heap_pop(heap):
try:
return heapq.heappop(heap)
except IndexError:
return None
class QIPInfo(object):
__slots__ = ("name", "address", "location", "network_type", "side",
"routers", "compartment")
def __init__(self, name, address, location, network_type,
side, routers, compartment):
self.name = name
self.address = address
self.location = location
self.network_type = network_type
self.side = side
self.routers = routers
self.compartment = compartment
def __lt__(self, other):
# The refresh algorithm depends on QIPInfo objects being ordered by the
# network IP address
return self.address.network_address < other.address.network_address
class QIPRefresh(object):
def __init__(self, session, logger, dbbuilding, dryrun, incremental):
self.session = session
self.logger = logger
self.config = Config()
self.building = dbbuilding
self.dryrun = dryrun
self.incremental = incremental
# Synchronize the internal environment only
self.net_env = NetworkEnvironment.get_unique_or_default(session)
self.errors = []
# Cache building and bunker information. Load all buildings even if
# we're interested in only one, so we can verify subnetdata.txt
self.buildings = {item.name: item for item in session.query(Building)}
self.bunkers = {item.name: item for item in session.query(Bunker)}
self.compartments = {item.name: item
for item in session.query(NetworkCompartment)}
# Used to limit the number of warnings
self.unknown_syslocs = set()
self.unknown_compartments = set()
self.ignored_compartments = set()
self.missing_compartments = False
# Load existing networks. We have to load all, otherwise we won't be
# able to fix networks with wrong location
q = session.query(Network)
q = q.filter_by(network_environment=self.net_env)
q = q.options(subqueryload("routers"),
subqueryload('network_compartment'))
self.aqnetworks = {item.network_address: item for item in q}
# Save how many networks we had initially
self.networks_before = len(self.aqnetworks)
# Plenaries that need to be updated
self.plenaries = PlenaryCollection(logger=logger)
# Network compartment restrictions
self.precreated_compartments_only = self.config.getboolean("network_refresh",
"precreated_compartments_only")
self.ignore_net_compartments = re.compile(
self.config.get("network_refresh", "ignore_network_compartments_regex")) if self.config.get(
"network_refresh", "ignore_network_compartments_regex") else None
def error(self, msg):
self.logger.error(msg)
self.errors.append(msg)
def commit_if_needed(self):
try:
if self.dryrun or self.incremental:
self.session.flush()
if self.incremental:
self.plenaries.write()
self.session.commit()
except Exception as err: # pragma: no cover
self.error(str(err))
self.session.rollback()
finally:
if self.incremental:
self.plenaries = PlenaryCollection(logger=self.logger)
def parse_line(self, line):
"""
Parses a line from subnetdata.txt
Returns None if the line did not contain network information. Otherwise
it returns the attributes that are interesting to us as a dict.
"""
# Format of subnetdata.txt:
# - Fields are separated by tabs
# - A field is a key/value pair, separated by a space
# - The value of the DefaultRouters field is a comma-separated list of
# IP addresses
# - The value of the UDF field is a list of "<key>=<value>" pairs,
# separated by ';'
qipinfo = {}
name = None
location = None
network_type = "unknown"
side = "a"
routers = []
compartment = None
fields = line.split("\t")
for field in fields:
# The value may contain embedded spaces
(key, value) = field.split(" ", 1)
# Some fields contain structured data
if key == "UDF":
udf = {}
for item in value.split(";"):
(udfkey, udfvalue) = item.split("=", 1)
udf[udfkey] = udfvalue
value = udf
qipinfo[key] = value
# Sanity check
if "SubnetId" not in qipinfo or "SubnetAddress" not in qipinfo or \
"SubnetMask" not in qipinfo:
self.logger.info("WARNING: Line contains no network: %s" % line)
return None
if "SubnetName" in qipinfo:
name = qipinfo["SubnetName"].strip().lower()
if not name:
name = qipinfo["SubnetAddress"]
# Parse the network address/netmask
address = IPv4Network(u"%s/%s" % (qipinfo["SubnetAddress"],
qipinfo["SubnetMask"]))
# Parse the list of routers
if "DefaultRouters" in qipinfo:
for addr in qipinfo["DefaultRouters"].split(","):
routers.append(IPv4Address(text_type(addr)))
if self.precreated_compartments_only and ("UDF" not in qipinfo or "COMPARTMENT" not in qipinfo["UDF"]):
if not self.missing_compartments:
self.logger.client_info("Missing network compartment info and "
"precreated_compartments_only set to "
"True, skipping these networks.")
self.missing_compartments = True
return None
# Extract MS-specific information from the UDF field
if "UDF" in qipinfo:
if "LOCATION" in qipinfo["UDF"]:
# Values in QIP sometimes contain spaces and mixed case
syslocstr = qipinfo["UDF"]["LOCATION"].strip().lower()
sysloc = syslocstr.split('.')
if len(sysloc) >= 3:
if sysloc[-3] in self.buildings:
location = self.buildings[sysloc[-3]]
else:
# Do not make "refresh network --all" fail if a new
# building does not exist in AQDB yet. Warn once for
# every unknown sysloc we encounter.
if syslocstr in self.unknown_syslocs:
return None
self.unknown_syslocs.add(syslocstr)
self.logger.client_info("Unknown building code in sysloc "
"%s, ignoring" % syslocstr)
return None
else:
raise ValueError("Failed to parse LOCATION")
if "BUCKET" in qipinfo["UDF"] and location:
bucket = qipinfo["UDF"]["BUCKET"].strip().lower()
bunker = bucket + "." + location.name
if bunker in self.bunkers:
location = self.bunkers[bunker]
if "TYPE" in qipinfo["UDF"]:
network_type = qipinfo["UDF"]["TYPE"].strip().lower()
if "SIDE" in qipinfo["UDF"]:
side = qipinfo["UDF"]["SIDE"].strip().lower()
if "COMPARTMENT" in qipinfo["UDF"]:
compartment_name = qipinfo["UDF"]["COMPARTMENT"].strip().lower()
if self.ignore_net_compartments and \
self.ignore_net_compartments.match(compartment_name):
if compartment_name not in self.ignored_compartments:
self.logger.client_info("Network compartment {} matches 'ignore_network_compartments_regex', "
"skipping these networks.".format(compartment_name))
self.ignored_compartments.add(compartment_name)
return None
if compartment_name in self.compartments:
compartment = self.compartments[compartment_name]
elif self.precreated_compartments_only:
if compartment_name not in self.unknown_compartments:
self.logger.client_info("Unknown network compartment {} and "
"precreated_compartments_only set to "
"True, skipping these networks.".format(compartment_name))
self.unknown_compartments.add(compartment_name)
return None
elif compartment_name not in self.unknown_compartments:
self.logger.client_info("Unknown compartment %s,"
" ignoring" % compartment_name)
self.unknown_compartments.add(compartment_name)
# FIXME: How to handle networks with no location? dsdb maps them to
# sysloc "xx.ny.na", so mimic that for now
if not location:
if "xx" in self.buildings:
location = self.buildings["xx"]
else:
# FIXME: the testsuite does not have the "xx" building
return None
return QIPInfo(name=name, address=address, location=location,
network_type=network_type, side=side,
routers=routers, compartment=compartment)
def update_network(self, dbnetwork, qipinfo):
""" Update the network parameters except the netmask """
# We don't want to add the plenary to self.plenaries if we aren't going
# to change anything
plenary = Plenary.get_plenary(dbnetwork)
updated = False
if dbnetwork.name != qipinfo.name:
self.logger.client_info("Setting network {0!s} name to {1}"
.format(dbnetwork, qipinfo.name))
dbnetwork.name = qipinfo.name
if dbnetwork.network_type != qipinfo.network_type:
self.logger.client_info("Setting network {0!s} type to {1}"
.format(dbnetwork, qipinfo.network_type))
dbnetwork.network_type = qipinfo.network_type
if dbnetwork.location != qipinfo.location:
self.logger.client_info("Setting network {0!s} location to {1:l}"
.format(dbnetwork, qipinfo.location))
dbnetwork.location = qipinfo.location
if dbnetwork.side != qipinfo.side:
self.logger.client_info("Setting network {0!s} side to {1}"
.format(dbnetwork, qipinfo.side))
dbnetwork.side = qipinfo.side
if dbnetwork.network_compartment != qipinfo.compartment:
self.logger.client_info("Setting network {0!s} compartment to {1!s}"
.format(dbnetwork, qipinfo.compartment))
dbnetwork.network_compartment = qipinfo.compartment
if dbnetwork in self.session.dirty:
updated = True
old_rtrs = set(dbnetwork.router_ips)
new_rtrs = set(qipinfo.routers)
del_routers = []
for router in dbnetwork.routers:
if router.ip in old_rtrs - new_rtrs:
del_routers.append(router)
for router in del_routers:
self.logger.client_info("Removing router {0:s} from "
"{1:l}".format(router.ip, dbnetwork))
for dns_rec in router.dns_records:
if dns_rec.is_unused:
delete_dns_record(dns_rec)
dbnetwork.routers.remove(router)
updated = True
for ip in new_rtrs - old_rtrs:
self.add_router(dbnetwork, ip)
updated = True
if updated:
self.plenaries.append(plenary)
# TODO: add support for updating router locations
return dbnetwork.netmask == qipinfo.address.netmask
def add_network(self, qipinfo):
dbnetwork = Network(name=qipinfo.name, network=qipinfo.address,
network_type=qipinfo.network_type,
side=qipinfo.side, location=qipinfo.location,
network_environment=self.net_env,
network_compartment=qipinfo.compartment)
self.session.add(dbnetwork)
self.logger.client_info("Adding network {0!s}".format(dbnetwork))
for ip in qipinfo.routers:
self.add_router(dbnetwork, ip)
self.plenaries.add(dbnetwork)
self.session.flush()
return dbnetwork
def del_network(self, dbnetwork):
# Check if the network is in use and a return readable error message if
# it is
in_use = False
for addr in dbnetwork.assignments:
self.error("{0} cannot be deleted because {1} is still assigned to "
"{2:l}.".format(dbnetwork, addr.ip, addr.interface))
in_use = True
for dns_rec in dbnetwork.dns_records:
if hasattr(dns_rec, "ip") and dns_rec.ip in dbnetwork.router_ips:
continue
self.error("{0} cannot be deleted because DNS record {1:a} still "
"exists.".format(dbnetwork, dns_rec))
in_use = True
if not in_use:
for router in dbnetwork.routers:
self.logger.client_info("Removing router {0:s} from "
"{1:l}".format(router.ip, dbnetwork))
for dns_rec in router.dns_records:
delete_dns_record(dns_rec)
dbnetwork.routers = []
self.logger.client_info("Deleting network {0!s}".format(dbnetwork))
self.session.delete(dbnetwork)
def add_router(self, dbnetwork, ip):
dbnetwork.routers.append(RouterAddress(ip=ip))
self.logger.client_info("Adding router {0:s} to "
"{1:l}".format(ip, dbnetwork))
def check_split_network(self, dbnetwork):
# If a network was split and some of the subnets were deleted, then
# some IP address allocations may be left in limbo. Force these cases to
# generate an SQL error by violating the NOT NULL constraint
self.session.execute(
update(AddressAssignment.__table__,
values={'network_id': None})
.where(and_(AddressAssignment.network_id == dbnetwork.id,
or_(AddressAssignment.ip < dbnetwork.network_address,
AddressAssignment.ip > dbnetwork.broadcast_address)))
)
self.session.execute(
update(ARecord.__table__,
values={'network_id': None})
.where(and_(ARecord.network_id == dbnetwork.id,
or_(ARecord.ip < dbnetwork.network_address,
ARecord.ip > dbnetwork.broadcast_address)))
)
def refresh(self, filehandle):
linecnt = 0
qipnetworks = {}
for line in filehandle:
linecnt += 1
line = line.rstrip("\n")
try:
qipinfo = self.parse_line(line)
if not qipinfo:
continue
if self.building and qipinfo.location.building != self.building:
continue
qipnetworks[qipinfo.address.network_address] = qipinfo
except ValueError as err:
self.error("%s; skipping line %d: %s" % (err, linecnt, line))
# Check/update network attributes that do not affect other objects. Do
# this in a single transaction, even in incremental mode
ips = list(self.aqnetworks.keys())[:]
for ip in ips:
if ip not in qipnetworks:
# "Forget" networks not inside the requested building to prevent
# them being deleted
if self.building and self.aqnetworks[ip].location.building != self.building:
del self.aqnetworks[ip]
continue
if self.update_network(self.aqnetworks[ip], qipnetworks[ip]):
# If the netmask did not change, then we're done with this
# network
del self.aqnetworks[ip]
del qipnetworks[ip]
self.commit_if_needed()
# What is left after this point is additions, deletions, splits and
# merges
aqnets = list(self.aqnetworks.values())
heapq.heapify(aqnets)
qipnets = list(qipnetworks.values())
heapq.heapify(qipnets)
aqnet = heap_pop(aqnets)
qipinfo = heap_pop(qipnets)
while aqnet or qipinfo:
if aqnet:
self.plenaries.add(aqnet)
# We have 3 cases regarding aqnet/qipinfo:
# - One contains the other: this is a split or a merge
# - aqnet.network_address < qipinfo.address.network_address (or
# there is no qipinfo): the network was deleted from QIP
# - qipinfo.address.network_address < aqnet.network_address (or
# there is no aqnet): a new network was added to QIP
if aqnet and qipinfo and (aqnet.network_address in qipinfo.address or
qipinfo.address.network_address in aqnet.network):
# This is a split or a merge. The trick here is to perform
# multiple network additions/deletions inside the same
# transaction even in incremental mode, to maintain relational
# integrity
startip = min(aqnet.network_address, qipinfo.address.network_address)
prefixlen = min(aqnet.cidr, qipinfo.address.prefixlen)
supernet = IPv4Network(u"%s/%s" % (startip, prefixlen))
# We may modify aqnet.network below, so save the original value
orig_net = aqnet.network
# Always deleting & possibly recreating aqnet would make things
# simpler, but we can't do that due to the unique constraint on
# the IP address and the non-null foreign key constraints in
# other tables. So we need a flag to remember if we want to keep
# the original object or not
if aqnet.network_address == qipinfo.address.network_address:
self.logger.client_info("Setting network {0!s} prefix "
"length to {1}"
.format(aqnet,
qipinfo.address.prefixlen))
aqnet.cidr = qipinfo.address.prefixlen
keep_aqnet = True
else:
# This can happen if the network was split, and then the
# first subnet was deleted
keep_aqnet = False
# Here we rely heavily on network sizes being a power of two, so
# supernet is either equal to aqnet or to qipinfo - partial
# overlap is not possible
if orig_net == supernet:
# Split:
# AQ: ******** (one big network)
# QIP: --**++++ (smaller networks, some may be missing)
if keep_aqnet:
# The first subnet was handled above by setting
# aqnet.cidr
qipinfo = heap_pop(qipnets)
else:
# The first subnet was deleted
pass
while qipinfo and qipinfo.address.network_address in orig_net:
newnet = self.add_network(qipinfo)
# Redirect addresses from the split network to the new
# subnet
fix_foreign_links(self.session, aqnet, newnet)
qipinfo = heap_pop(qipnets)
if keep_aqnet:
self.check_split_network(aqnet)
else:
self.del_network(aqnet)
aqnet = heap_pop(aqnets)
else:
# Merge:
# AQ: --++**** (smaller networks, some may be missing)
# QIP: ******** (one big network)
if keep_aqnet:
# The first subnet was handled above by setting
# aqnet.cidr
newnet = aqnet
aqnet = heap_pop(aqnets)
else:
# The first subnet was missing from AQDB before
newnet = self.add_network(qipinfo)
while aqnet and aqnet.network_address in newnet.network:
# Redirect addresses from the subnet to the merged
# network
fix_foreign_links(self.session, aqnet, newnet)
self.del_network(aqnet)
aqnet = heap_pop(aqnets)
qipinfo = heap_pop(qipnets)
elif aqnet and (not qipinfo or aqnet.network_address <
qipinfo.address.network_address):
# Network is deleted
self.del_network(aqnet)
aqnet = heap_pop(aqnets)
else:
# New network
self.add_network(qipinfo)
qipinfo = heap_pop(qipnets)
self.commit_if_needed()
self.session.flush()
self.plenaries.flatten()
self.plenaries.write()
if self.errors:
if self.incremental:
msg = ""
else:
msg = "No changes applied because of errors."
raise PartialError(success=[], failed=self.errors, success_msg=msg)
|
|
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
from typing import Any, AsyncIterable, Callable, Dict, Generic, Optional, TypeVar, Union
import warnings
from azure.core.async_paging import AsyncItemPaged, AsyncList
from azure.core.exceptions import ClientAuthenticationError, HttpResponseError, ResourceExistsError, ResourceNotFoundError, map_error
from azure.core.pipeline import PipelineResponse
from azure.core.pipeline.transport import AsyncHttpResponse, HttpRequest
from azure.core.polling import AsyncLROPoller, AsyncNoPolling, AsyncPollingMethod
from azure.mgmt.core.exceptions import ARMErrorFormat
from azure.mgmt.core.polling.async_arm_polling import AsyncARMPolling
from ... import models as _models
T = TypeVar('T')
ClsType = Optional[Callable[[PipelineResponse[HttpRequest, AsyncHttpResponse], T, Dict[str, Any]], Any]]
class SubnetsOperations:
"""SubnetsOperations async operations.
You should not instantiate this class directly. Instead, you should create a Client instance that
instantiates it for you and attaches it as an attribute.
:ivar models: Alias to model classes used in this operation group.
:type models: ~azure.mgmt.network.v2019_09_01.models
:param client: Client for service requests.
:param config: Configuration of service client.
:param serializer: An object model serializer.
:param deserializer: An object model deserializer.
"""
models = _models
def __init__(self, client, config, serializer, deserializer) -> None:
self._client = client
self._serialize = serializer
self._deserialize = deserializer
self._config = config
async def _delete_initial(
self,
resource_group_name: str,
virtual_network_name: str,
subnet_name: str,
**kwargs: Any
) -> None:
cls = kwargs.pop('cls', None) # type: ClsType[None]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2019-09-01"
accept = "application/json"
# Construct URL
url = self._delete_initial.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'virtualNetworkName': self._serialize.url("virtual_network_name", virtual_network_name, 'str'),
'subnetName': self._serialize.url("subnet_name", subnet_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request = self._client.delete(url, query_parameters, header_parameters)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 202, 204]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
if cls:
return cls(pipeline_response, None, {})
_delete_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/virtualNetworks/{virtualNetworkName}/subnets/{subnetName}'} # type: ignore
async def begin_delete(
self,
resource_group_name: str,
virtual_network_name: str,
subnet_name: str,
**kwargs: Any
) -> AsyncLROPoller[None]:
"""Deletes the specified subnet.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param virtual_network_name: The name of the virtual network.
:type virtual_network_name: str
:param subnet_name: The name of the subnet.
:type subnet_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: By default, your polling method will be AsyncARMPolling.
Pass in False for this operation to not poll, or pass in your own initialized polling object for a personal polling strategy.
:paramtype polling: bool or ~azure.core.polling.AsyncPollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present.
:return: An instance of AsyncLROPoller that returns either None or the result of cls(response)
:rtype: ~azure.core.polling.AsyncLROPoller[None]
:raises ~azure.core.exceptions.HttpResponseError:
"""
polling = kwargs.pop('polling', True) # type: Union[bool, AsyncPollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType[None]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = await self._delete_initial(
resource_group_name=resource_group_name,
virtual_network_name=virtual_network_name,
subnet_name=subnet_name,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
kwargs.pop('content_type', None)
def get_long_running_output(pipeline_response):
if cls:
return cls(pipeline_response, None, {})
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'virtualNetworkName': self._serialize.url("virtual_network_name", virtual_network_name, 'str'),
'subnetName': self._serialize.url("subnet_name", subnet_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
if polling is True: polling_method = AsyncARMPolling(lro_delay, lro_options={'final-state-via': 'location'}, path_format_arguments=path_format_arguments, **kwargs)
elif polling is False: polling_method = AsyncNoPolling()
else: polling_method = polling
if cont_token:
return AsyncLROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return AsyncLROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_delete.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/virtualNetworks/{virtualNetworkName}/subnets/{subnetName}'} # type: ignore
async def get(
self,
resource_group_name: str,
virtual_network_name: str,
subnet_name: str,
expand: Optional[str] = None,
**kwargs: Any
) -> "_models.Subnet":
"""Gets the specified subnet by virtual network and resource group.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param virtual_network_name: The name of the virtual network.
:type virtual_network_name: str
:param subnet_name: The name of the subnet.
:type subnet_name: str
:param expand: Expands referenced resources.
:type expand: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: Subnet, or the result of cls(response)
:rtype: ~azure.mgmt.network.v2019_09_01.models.Subnet
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.Subnet"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2019-09-01"
accept = "application/json"
# Construct URL
url = self.get.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'virtualNetworkName': self._serialize.url("virtual_network_name", virtual_network_name, 'str'),
'subnetName': self._serialize.url("subnet_name", subnet_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
if expand is not None:
query_parameters['$expand'] = self._serialize.query("expand", expand, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request = self._client.get(url, query_parameters, header_parameters)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = self._deserialize('Subnet', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
get.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/virtualNetworks/{virtualNetworkName}/subnets/{subnetName}'} # type: ignore
async def _create_or_update_initial(
self,
resource_group_name: str,
virtual_network_name: str,
subnet_name: str,
subnet_parameters: "_models.Subnet",
**kwargs: Any
) -> "_models.Subnet":
cls = kwargs.pop('cls', None) # type: ClsType["_models.Subnet"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2019-09-01"
content_type = kwargs.pop("content_type", "application/json")
accept = "application/json"
# Construct URL
url = self._create_or_update_initial.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'virtualNetworkName': self._serialize.url("virtual_network_name", virtual_network_name, 'str'),
'subnetName': self._serialize.url("subnet_name", subnet_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str')
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
body_content_kwargs = {} # type: Dict[str, Any]
body_content = self._serialize.body(subnet_parameters, 'Subnet')
body_content_kwargs['content'] = body_content
request = self._client.put(url, query_parameters, header_parameters, **body_content_kwargs)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 201]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
if response.status_code == 200:
deserialized = self._deserialize('Subnet', pipeline_response)
if response.status_code == 201:
deserialized = self._deserialize('Subnet', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
_create_or_update_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/virtualNetworks/{virtualNetworkName}/subnets/{subnetName}'} # type: ignore
async def begin_create_or_update(
self,
resource_group_name: str,
virtual_network_name: str,
subnet_name: str,
subnet_parameters: "_models.Subnet",
**kwargs: Any
) -> AsyncLROPoller["_models.Subnet"]:
"""Creates or updates a subnet in the specified virtual network.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param virtual_network_name: The name of the virtual network.
:type virtual_network_name: str
:param subnet_name: The name of the subnet.
:type subnet_name: str
:param subnet_parameters: Parameters supplied to the create or update subnet operation.
:type subnet_parameters: ~azure.mgmt.network.v2019_09_01.models.Subnet
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: By default, your polling method will be AsyncARMPolling.
Pass in False for this operation to not poll, or pass in your own initialized polling object for a personal polling strategy.
:paramtype polling: bool or ~azure.core.polling.AsyncPollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present.
:return: An instance of AsyncLROPoller that returns either Subnet or the result of cls(response)
:rtype: ~azure.core.polling.AsyncLROPoller[~azure.mgmt.network.v2019_09_01.models.Subnet]
:raises ~azure.core.exceptions.HttpResponseError:
"""
polling = kwargs.pop('polling', True) # type: Union[bool, AsyncPollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType["_models.Subnet"]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = await self._create_or_update_initial(
resource_group_name=resource_group_name,
virtual_network_name=virtual_network_name,
subnet_name=subnet_name,
subnet_parameters=subnet_parameters,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
kwargs.pop('content_type', None)
def get_long_running_output(pipeline_response):
deserialized = self._deserialize('Subnet', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'virtualNetworkName': self._serialize.url("virtual_network_name", virtual_network_name, 'str'),
'subnetName': self._serialize.url("subnet_name", subnet_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
if polling is True: polling_method = AsyncARMPolling(lro_delay, lro_options={'final-state-via': 'azure-async-operation'}, path_format_arguments=path_format_arguments, **kwargs)
elif polling is False: polling_method = AsyncNoPolling()
else: polling_method = polling
if cont_token:
return AsyncLROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return AsyncLROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_create_or_update.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/virtualNetworks/{virtualNetworkName}/subnets/{subnetName}'} # type: ignore
async def _prepare_network_policies_initial(
self,
resource_group_name: str,
virtual_network_name: str,
subnet_name: str,
prepare_network_policies_request_parameters: "_models.PrepareNetworkPoliciesRequest",
**kwargs: Any
) -> None:
cls = kwargs.pop('cls', None) # type: ClsType[None]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2019-09-01"
content_type = kwargs.pop("content_type", "application/json")
accept = "application/json"
# Construct URL
url = self._prepare_network_policies_initial.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'virtualNetworkName': self._serialize.url("virtual_network_name", virtual_network_name, 'str'),
'subnetName': self._serialize.url("subnet_name", subnet_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str')
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
body_content_kwargs = {} # type: Dict[str, Any]
body_content = self._serialize.body(prepare_network_policies_request_parameters, 'PrepareNetworkPoliciesRequest')
body_content_kwargs['content'] = body_content
request = self._client.post(url, query_parameters, header_parameters, **body_content_kwargs)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 202]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
if cls:
return cls(pipeline_response, None, {})
_prepare_network_policies_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/virtualNetworks/{virtualNetworkName}/subnets/{subnetName}/PrepareNetworkPolicies'} # type: ignore
async def begin_prepare_network_policies(
self,
resource_group_name: str,
virtual_network_name: str,
subnet_name: str,
prepare_network_policies_request_parameters: "_models.PrepareNetworkPoliciesRequest",
**kwargs: Any
) -> AsyncLROPoller[None]:
"""Prepares a subnet by applying network intent policies.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param virtual_network_name: The name of the virtual network.
:type virtual_network_name: str
:param subnet_name: The name of the subnet.
:type subnet_name: str
:param prepare_network_policies_request_parameters: Parameters supplied to prepare subnet by
applying network intent policies.
:type prepare_network_policies_request_parameters: ~azure.mgmt.network.v2019_09_01.models.PrepareNetworkPoliciesRequest
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: By default, your polling method will be AsyncARMPolling.
Pass in False for this operation to not poll, or pass in your own initialized polling object for a personal polling strategy.
:paramtype polling: bool or ~azure.core.polling.AsyncPollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present.
:return: An instance of AsyncLROPoller that returns either None or the result of cls(response)
:rtype: ~azure.core.polling.AsyncLROPoller[None]
:raises ~azure.core.exceptions.HttpResponseError:
"""
polling = kwargs.pop('polling', True) # type: Union[bool, AsyncPollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType[None]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = await self._prepare_network_policies_initial(
resource_group_name=resource_group_name,
virtual_network_name=virtual_network_name,
subnet_name=subnet_name,
prepare_network_policies_request_parameters=prepare_network_policies_request_parameters,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
kwargs.pop('content_type', None)
def get_long_running_output(pipeline_response):
if cls:
return cls(pipeline_response, None, {})
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'virtualNetworkName': self._serialize.url("virtual_network_name", virtual_network_name, 'str'),
'subnetName': self._serialize.url("subnet_name", subnet_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
if polling is True: polling_method = AsyncARMPolling(lro_delay, lro_options={'final-state-via': 'location'}, path_format_arguments=path_format_arguments, **kwargs)
elif polling is False: polling_method = AsyncNoPolling()
else: polling_method = polling
if cont_token:
return AsyncLROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return AsyncLROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_prepare_network_policies.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/virtualNetworks/{virtualNetworkName}/subnets/{subnetName}/PrepareNetworkPolicies'} # type: ignore
async def _unprepare_network_policies_initial(
self,
resource_group_name: str,
virtual_network_name: str,
subnet_name: str,
unprepare_network_policies_request_parameters: "_models.UnprepareNetworkPoliciesRequest",
**kwargs: Any
) -> None:
cls = kwargs.pop('cls', None) # type: ClsType[None]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2019-09-01"
content_type = kwargs.pop("content_type", "application/json")
accept = "application/json"
# Construct URL
url = self._unprepare_network_policies_initial.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'virtualNetworkName': self._serialize.url("virtual_network_name", virtual_network_name, 'str'),
'subnetName': self._serialize.url("subnet_name", subnet_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str')
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
body_content_kwargs = {} # type: Dict[str, Any]
body_content = self._serialize.body(unprepare_network_policies_request_parameters, 'UnprepareNetworkPoliciesRequest')
body_content_kwargs['content'] = body_content
request = self._client.post(url, query_parameters, header_parameters, **body_content_kwargs)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 202]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
if cls:
return cls(pipeline_response, None, {})
_unprepare_network_policies_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/virtualNetworks/{virtualNetworkName}/subnets/{subnetName}/UnprepareNetworkPolicies'} # type: ignore
async def begin_unprepare_network_policies(
self,
resource_group_name: str,
virtual_network_name: str,
subnet_name: str,
unprepare_network_policies_request_parameters: "_models.UnprepareNetworkPoliciesRequest",
**kwargs: Any
) -> AsyncLROPoller[None]:
"""Unprepares a subnet by removing network intent policies.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param virtual_network_name: The name of the virtual network.
:type virtual_network_name: str
:param subnet_name: The name of the subnet.
:type subnet_name: str
:param unprepare_network_policies_request_parameters: Parameters supplied to unprepare subnet
to remove network intent policies.
:type unprepare_network_policies_request_parameters: ~azure.mgmt.network.v2019_09_01.models.UnprepareNetworkPoliciesRequest
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: By default, your polling method will be AsyncARMPolling.
Pass in False for this operation to not poll, or pass in your own initialized polling object for a personal polling strategy.
:paramtype polling: bool or ~azure.core.polling.AsyncPollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present.
:return: An instance of AsyncLROPoller that returns either None or the result of cls(response)
:rtype: ~azure.core.polling.AsyncLROPoller[None]
:raises ~azure.core.exceptions.HttpResponseError:
"""
polling = kwargs.pop('polling', True) # type: Union[bool, AsyncPollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType[None]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = await self._unprepare_network_policies_initial(
resource_group_name=resource_group_name,
virtual_network_name=virtual_network_name,
subnet_name=subnet_name,
unprepare_network_policies_request_parameters=unprepare_network_policies_request_parameters,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
kwargs.pop('content_type', None)
def get_long_running_output(pipeline_response):
if cls:
return cls(pipeline_response, None, {})
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'virtualNetworkName': self._serialize.url("virtual_network_name", virtual_network_name, 'str'),
'subnetName': self._serialize.url("subnet_name", subnet_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
if polling is True: polling_method = AsyncARMPolling(lro_delay, lro_options={'final-state-via': 'location'}, path_format_arguments=path_format_arguments, **kwargs)
elif polling is False: polling_method = AsyncNoPolling()
else: polling_method = polling
if cont_token:
return AsyncLROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return AsyncLROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_unprepare_network_policies.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/virtualNetworks/{virtualNetworkName}/subnets/{subnetName}/UnprepareNetworkPolicies'} # type: ignore
def list(
self,
resource_group_name: str,
virtual_network_name: str,
**kwargs: Any
) -> AsyncIterable["_models.SubnetListResult"]:
"""Gets all subnets in a virtual network.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param virtual_network_name: The name of the virtual network.
:type virtual_network_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either SubnetListResult or the result of cls(response)
:rtype: ~azure.core.async_paging.AsyncItemPaged[~azure.mgmt.network.v2019_09_01.models.SubnetListResult]
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.SubnetListResult"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2019-09-01"
accept = "application/json"
def prepare_request(next_link=None):
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
if not next_link:
# Construct URL
url = self.list.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'virtualNetworkName': self._serialize.url("virtual_network_name", virtual_network_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
request = self._client.get(url, query_parameters, header_parameters)
else:
url = next_link
query_parameters = {} # type: Dict[str, Any]
request = self._client.get(url, query_parameters, header_parameters)
return request
async def extract_data(pipeline_response):
deserialized = self._deserialize('SubnetListResult', pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return deserialized.next_link or None, AsyncList(list_of_elem)
async def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
return pipeline_response
return AsyncItemPaged(
get_next, extract_data
)
list.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/virtualNetworks/{virtualNetworkName}/subnets'} # type: ignore
|
|
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import re
import sys
import unittest
from shutil import copyfile, copytree
from tempfile import TemporaryDirectory
import jmespath
import pytest
from parameterized import parameterized
from tests.helm_template_generator import render_chart
class PodTemplateFileTest(unittest.TestCase):
@classmethod
@pytest.fixture(autouse=True, scope="class")
def isolate_chart(cls):
with TemporaryDirectory() as tmp_dir:
cls.temp_chart_dir = tmp_dir + "/chart"
copytree(sys.path[0], cls.temp_chart_dir)
copyfile(
cls.temp_chart_dir + "/files/pod-template-file.kubernetes-helm-yaml",
cls.temp_chart_dir + "/templates/pod-template-file.yaml",
)
yield
def test_should_work(self):
docs = render_chart(
values={},
show_only=["templates/pod-template-file.yaml"],
chart_dir=self.temp_chart_dir,
)
assert re.search("Pod", docs[0]["kind"])
assert jmespath.search("spec.containers[0].image", docs[0]) is not None
assert "base" == jmespath.search("spec.containers[0].name", docs[0])
def test_should_add_an_init_container_if_git_sync_is_true(self):
docs = render_chart(
values={
"images": {
"gitSync": {
"repository": "test-registry/test-repo",
"tag": "test-tag",
"pullPolicy": "Always",
}
},
"dags": {
"gitSync": {
"enabled": True,
"containerName": "git-sync-test",
"wait": 66,
"maxFailures": 70,
"subPath": "path1/path2",
"rev": "HEAD",
"depth": 1,
"repo": "https://github.com/apache/airflow.git",
"branch": "test-branch",
"sshKeySecret": None,
"credentialsSecret": None,
"knownHosts": None,
}
},
},
show_only=["templates/pod-template-file.yaml"],
chart_dir=self.temp_chart_dir,
)
assert re.search("Pod", docs[0]["kind"])
assert {
"name": "git-sync-test",
"securityContext": {"runAsUser": 65533},
"image": "test-registry/test-repo:test-tag",
"imagePullPolicy": "Always",
"env": [
{"name": "GIT_SYNC_REV", "value": "HEAD"},
{"name": "GIT_SYNC_BRANCH", "value": "test-branch"},
{"name": "GIT_SYNC_REPO", "value": "https://github.com/apache/airflow.git"},
{"name": "GIT_SYNC_DEPTH", "value": "1"},
{"name": "GIT_SYNC_ROOT", "value": "/git"},
{"name": "GIT_SYNC_DEST", "value": "repo"},
{"name": "GIT_SYNC_ADD_USER", "value": "true"},
{"name": "GIT_SYNC_WAIT", "value": "66"},
{"name": "GIT_SYNC_MAX_SYNC_FAILURES", "value": "70"},
{"name": "GIT_SYNC_ONE_TIME", "value": "true"},
],
"volumeMounts": [{"mountPath": "/git", "name": "dags"}],
"resources": {},
} == jmespath.search("spec.initContainers[0]", docs[0])
def test_should_not_add_init_container_if_dag_persistence_is_true(self):
docs = render_chart(
values={
"dags": {
"persistence": {"enabled": True},
"gitSync": {"enabled": True},
}
},
show_only=["templates/pod-template-file.yaml"],
chart_dir=self.temp_chart_dir,
)
assert jmespath.search("spec.initContainers", docs[0]) is None
@parameterized.expand(
[
({"gitSync": {"enabled": True}}, True),
({"persistence": {"enabled": True}}, False),
(
{
"gitSync": {"enabled": True},
"persistence": {"enabled": True},
},
True,
),
]
)
def test_dags_mount(self, dag_values, expected_read_only):
docs = render_chart(
values={"dags": dag_values},
show_only=["templates/pod-template-file.yaml"],
chart_dir=self.temp_chart_dir,
)
assert {
"mountPath": "/opt/airflow/dags",
"name": "dags",
"readOnly": expected_read_only,
} in jmespath.search("spec.containers[0].volumeMounts", docs[0])
def test_validate_if_ssh_params_are_added(self):
docs = render_chart(
values={
"dags": {
"gitSync": {
"enabled": True,
"containerName": "git-sync-test",
"sshKeySecret": "ssh-secret",
"knownHosts": None,
"branch": "test-branch",
}
}
},
show_only=["templates/pod-template-file.yaml"],
chart_dir=self.temp_chart_dir,
)
assert {"name": "GIT_SSH_KEY_FILE", "value": "/etc/git-secret/ssh"} in jmespath.search(
"spec.initContainers[0].env", docs[0]
)
assert {"name": "GIT_SYNC_SSH", "value": "true"} in jmespath.search(
"spec.initContainers[0].env", docs[0]
)
assert {"name": "GIT_KNOWN_HOSTS", "value": "false"} in jmespath.search(
"spec.initContainers[0].env", docs[0]
)
assert {
"name": "git-sync-ssh-key",
"mountPath": "/etc/git-secret/ssh",
"subPath": "gitSshKey",
"readOnly": True,
} in jmespath.search("spec.initContainers[0].volumeMounts", docs[0])
assert {
"name": "git-sync-ssh-key",
"secret": {"secretName": "ssh-secret", "defaultMode": 288},
} in jmespath.search("spec.volumes", docs[0])
def test_validate_if_ssh_known_hosts_are_added(self):
docs = render_chart(
values={
"dags": {
"gitSync": {
"enabled": True,
"containerName": "git-sync-test",
"sshKeySecret": "ssh-secret",
"knownHosts": "github.com ssh-rsa AAAABdummy",
"branch": "test-branch",
}
}
},
show_only=["templates/pod-template-file.yaml"],
chart_dir=self.temp_chart_dir,
)
assert {"name": "GIT_KNOWN_HOSTS", "value": "true"} in jmespath.search(
"spec.initContainers[0].env", docs[0]
)
assert {
"name": "GIT_SSH_KNOWN_HOSTS_FILE",
"value": "/etc/git-secret/known_hosts",
} in jmespath.search("spec.initContainers[0].env", docs[0])
assert {
"name": "config",
"mountPath": "/etc/git-secret/known_hosts",
"subPath": "known_hosts",
"readOnly": True,
} in jmespath.search("spec.initContainers[0].volumeMounts", docs[0])
def test_should_set_username_and_pass_env_variables(self):
docs = render_chart(
values={
"dags": {
"gitSync": {
"enabled": True,
"credentialsSecret": "user-pass-secret",
"sshKeySecret": None,
}
}
},
show_only=["templates/pod-template-file.yaml"],
chart_dir=self.temp_chart_dir,
)
assert {
"name": "GIT_SYNC_USERNAME",
"valueFrom": {"secretKeyRef": {"name": "user-pass-secret", "key": "GIT_SYNC_USERNAME"}},
} in jmespath.search("spec.initContainers[0].env", docs[0])
assert {
"name": "GIT_SYNC_PASSWORD",
"valueFrom": {"secretKeyRef": {"name": "user-pass-secret", "key": "GIT_SYNC_PASSWORD"}},
} in jmespath.search("spec.initContainers[0].env", docs[0])
def test_should_set_the_dags_volume_claim_correctly_when_using_an_existing_claim(self):
docs = render_chart(
values={"dags": {"persistence": {"enabled": True, "existingClaim": "test-claim"}}},
show_only=["templates/pod-template-file.yaml"],
chart_dir=self.temp_chart_dir,
)
assert {"name": "dags", "persistentVolumeClaim": {"claimName": "test-claim"}} in jmespath.search(
"spec.volumes", docs[0]
)
def test_should_use_empty_dir_for_gitsync_without_persistence(self):
docs = render_chart(
values={"dags": {"gitSync": {"enabled": True}}},
show_only=["templates/pod-template-file.yaml"],
chart_dir=self.temp_chart_dir,
)
assert {"name": "dags", "emptyDir": {}} in jmespath.search("spec.volumes", docs[0])
@parameterized.expand(
[
({"enabled": False}, {"emptyDir": {}}),
({"enabled": True}, {"persistentVolumeClaim": {"claimName": "RELEASE-NAME-logs"}}),
(
{"enabled": True, "existingClaim": "test-claim"},
{"persistentVolumeClaim": {"claimName": "test-claim"}},
),
]
)
def test_logs_persistence_changes_volume(self, log_persistence_values, expected):
docs = render_chart(
values={"logs": {"persistence": log_persistence_values}},
show_only=["templates/pod-template-file.yaml"],
chart_dir=self.temp_chart_dir,
)
assert {"name": "logs", **expected} in jmespath.search("spec.volumes", docs[0])
def test_should_set_a_custom_image_in_pod_template(self):
docs = render_chart(
values={"images": {"pod_template": {"repository": "dummy_image", "tag": "latest"}}},
show_only=["templates/pod-template-file.yaml"],
chart_dir=self.temp_chart_dir,
)
assert re.search("Pod", docs[0]["kind"])
assert "dummy_image:latest" == jmespath.search("spec.containers[0].image", docs[0])
assert "base" == jmespath.search("spec.containers[0].name", docs[0])
def test_mount_airflow_cfg(self):
docs = render_chart(
values={},
show_only=["templates/pod-template-file.yaml"],
chart_dir=self.temp_chart_dir,
)
assert re.search("Pod", docs[0]["kind"])
assert {'configMap': {'name': 'RELEASE-NAME-airflow-config'}, 'name': 'config'} == jmespath.search(
"spec.volumes[1]", docs[0]
)
assert {
'name': 'config',
'mountPath': '/opt/airflow/airflow.cfg',
'subPath': 'airflow.cfg',
'readOnly': True,
} == jmespath.search("spec.containers[0].volumeMounts[1]", docs[0])
def test_should_create_valid_affinity_and_node_selector(self):
docs = render_chart(
values={
"affinity": {
"nodeAffinity": {
"requiredDuringSchedulingIgnoredDuringExecution": {
"nodeSelectorTerms": [
{
"matchExpressions": [
{"key": "foo", "operator": "In", "values": ["true"]},
]
}
]
}
}
},
"tolerations": [
{"key": "dynamic-pods", "operator": "Equal", "value": "true", "effect": "NoSchedule"}
],
"nodeSelector": {"diskType": "ssd"},
},
show_only=["templates/pod-template-file.yaml"],
chart_dir=self.temp_chart_dir,
)
assert re.search("Pod", docs[0]["kind"])
assert "foo" == jmespath.search(
"spec.affinity.nodeAffinity."
"requiredDuringSchedulingIgnoredDuringExecution."
"nodeSelectorTerms[0]."
"matchExpressions[0]."
"key",
docs[0],
)
assert "ssd" == jmespath.search(
"spec.nodeSelector.diskType",
docs[0],
)
assert "dynamic-pods" == jmespath.search(
"spec.tolerations[0].key",
docs[0],
)
def test_should_add_fsgroup_to_the_pod_template(self):
docs = render_chart(
values={"gid": 5000},
show_only=["templates/pod-template-file.yaml"],
chart_dir=self.temp_chart_dir,
)
self.assertEqual(5000, jmespath.search("spec.securityContext.fsGroup", docs[0]))
def test_should_create_valid_volume_mount_and_volume(self):
docs = render_chart(
values={
"workers": {
"extraVolumes": [{"name": "test-volume", "emptyDir": {}}],
"extraVolumeMounts": [{"name": "test-volume", "mountPath": "/opt/test"}],
}
},
show_only=["templates/pod-template-file.yaml"],
chart_dir=self.temp_chart_dir,
)
assert "test-volume" == jmespath.search(
"spec.volumes[2].name",
docs[0],
)
assert "test-volume" == jmespath.search(
"spec.containers[0].volumeMounts[2].name",
docs[0],
)
def test_should_add_env_for_gitsync(self):
docs = render_chart(
values={
"dags": {
"gitSync": {
"enabled": True,
"env": [{"name": "FOO", "value": "bar"}],
}
},
},
show_only=["templates/pod-template-file.yaml"],
chart_dir=self.temp_chart_dir,
)
assert {"name": "FOO", "value": "bar"} in jmespath.search("spec.initContainers[0].env", docs[0])
def test_no_airflow_local_settings_by_default(self):
docs = render_chart(show_only=["templates/pod-template-file.yaml"], chart_dir=self.temp_chart_dir)
volume_mounts = jmespath.search("spec.containers[0].volumeMounts", docs[0])
assert "airflow_local_settings.py" not in str(volume_mounts)
def test_airflow_local_settings(self):
docs = render_chart(
values={"airflowLocalSettings": "# Well hello!"},
show_only=["templates/pod-template-file.yaml"],
chart_dir=self.temp_chart_dir,
)
assert {
"name": "config",
"mountPath": "/opt/airflow/config/airflow_local_settings.py",
"subPath": "airflow_local_settings.py",
"readOnly": True,
} in jmespath.search("spec.containers[0].volumeMounts", docs[0])
def test_airflow_pod_annotations(self):
docs = render_chart(
values={"airflowPodAnnotations": {"my_annotation": "annotated!"}},
show_only=["templates/pod-template-file.yaml"],
chart_dir=self.temp_chart_dir,
)
annotations = jmespath.search("metadata.annotations", docs[0])
assert "my_annotation" in annotations
assert "annotated!" in annotations["my_annotation"]
def test_should_add_extra_init_containers(self):
docs = render_chart(
values={
"workers": {
"extraInitContainers": [
{"name": "test-init-container", "image": "test-registry/test-repo:test-tag"}
],
},
},
show_only=["templates/pod-template-file.yaml"],
chart_dir=self.temp_chart_dir,
)
assert {
"name": "test-init-container",
"image": "test-registry/test-repo:test-tag",
} == jmespath.search("spec.initContainers[-1]", docs[0])
def test_should_add_pod_labels(self):
docs = render_chart(
values={"labels": {"label1": "value1", "label2": "value2"}},
show_only=["templates/pod-template-file.yaml"],
chart_dir=self.temp_chart_dir,
)
assert {
"label1": "value1",
"label2": "value2",
"release": "RELEASE-NAME",
"component": "worker",
"tier": "airflow",
} == jmespath.search("metadata.labels", docs[0])
|
|
#!/usr/bin/env python3
"""Test the general MG solver with a variable coeffcient Helmholtz
problem. This ensures we didn't screw up the base functionality here.
Here we solve::
alpha phi + div . ( beta grad phi ) = f
with::
alpha = 1.0
beta = 2.0 + cos(2.0*pi*x)*cos(2.0*pi*y)
f = (-16.0*pi**2*cos(2*pi*x)*cos(2*pi*y) - 16.0*pi**2 + 1.0)*sin(2*pi*x)*sin(2*pi*y)
This has the exact solution::
phi = sin(2.0*pi*x)*sin(2.0*pi*y)
on [0,1] x [0,1]
We use Dirichlet BCs on phi. For beta, we do not have to impose the
same BCs, since that may represent a different physical quantity.
Here we take beta to have Neumann BCs. (Dirichlet BCs for beta will
force it to 0 on the boundary, which is not correct here)
"""
from __future__ import print_function
import os
import numpy as np
import matplotlib.pyplot as plt
import compare
import mesh.boundary as bnd
import mesh.patch as patch
import multigrid.general_MG as MG
from util import msg, io
# the analytic solution
def true(x, y):
return np.sin(2.0*np.pi*x)*np.sin(2.0*np.pi*y)
# the coefficients
def alpha(x, y):
return np.ones_like(x)
def beta(x, y):
return 2.0 + np.cos(2.0*np.pi*x)*np.cos(2.0*np.pi*y)
def gamma_x(x, y):
return np.zeros_like(x)
def gamma_y(x, y):
return np.zeros_like(x)
# the righthand side
def f(x, y):
return (-16.0*np.pi**2*np.cos(2*np.pi*x)*np.cos(2*np.pi*y) -
16.0*np.pi**2 + 1.0)*np.sin(2*np.pi*x)*np.sin(2*np.pi*y)
def test_general_poisson_dirichlet(N, store_bench=False, comp_bench=False,
make_plot=False, verbose=1, rtol=1.e-12):
"""
test the general MG solver. The return value
here is the error compared to the exact solution, UNLESS
comp_bench=True, in which case the return value is the
error compared to the stored benchmark
"""
# test the multigrid solver
nx = N
ny = nx
# create the coefficient variable
g = patch.Grid2d(nx, ny, ng=1)
d = patch.CellCenterData2d(g)
bc_c = bnd.BC(xlb="neumann", xrb="neumann",
ylb="neumann", yrb="neumann")
d.register_var("alpha", bc_c)
d.register_var("beta", bc_c)
d.register_var("gamma_x", bc_c)
d.register_var("gamma_y", bc_c)
d.create()
a = d.get_var("alpha")
a[:, :] = alpha(g.x2d, g.y2d)
b = d.get_var("beta")
b[:, :] = beta(g.x2d, g.y2d)
gx = d.get_var("gamma_x")
gx[:, :] = gamma_x(g.x2d, g.y2d)
gy = d.get_var("gamma_y")
gy[:, :] = gamma_y(g.x2d, g.y2d)
# create the multigrid object
a = MG.GeneralMG2d(nx, ny,
xl_BC_type="dirichlet", yl_BC_type="dirichlet",
xr_BC_type="dirichlet", yr_BC_type="dirichlet",
coeffs=d,
verbose=verbose, vis=0, true_function=true)
# initialize the solution to 0
a.init_zeros()
# initialize the RHS using the function f
rhs = f(a.x2d, a.y2d)
a.init_RHS(rhs)
# solve to a relative tolerance of 1.e-11
a.solve(rtol=1.e-11)
# alternately, we can just use smoothing by uncommenting the following
# a.smooth(a.nlevels-1,50000)
# get the solution
v = a.get_solution()
# compute the error from the analytic solution
b = true(a.x2d, a.y2d)
e = v - b
enorm = e.norm()
print(" L2 error from true solution = %g\n rel. err from previous cycle = %g\n num. cycles = %d" %
(enorm, a.relative_error, a.num_cycles))
# plot the solution
if make_plot:
plt.clf()
plt.figure(figsize=(10.0, 4.0), dpi=100, facecolor='w')
plt.subplot(121)
img1 = plt.imshow(np.transpose(v.v()),
interpolation="nearest", origin="lower",
extent=[a.xmin, a.xmax, a.ymin, a.ymax])
plt.xlabel("x")
plt.ylabel("y")
plt.title("nx = {}".format(nx))
plt.colorbar(img1)
plt.subplot(122)
img2 = plt.imshow(np.transpose(e.v()),
interpolation="nearest", origin="lower",
extent=[a.xmin, a.xmax, a.ymin, a.ymax])
plt.xlabel("x")
plt.ylabel("y")
plt.title("error")
plt.colorbar(img2)
plt.tight_layout()
plt.savefig("mg_general_alphabeta_only_test.png")
# store the output for later comparison
bench = "mg_general_poisson_dirichlet"
bench_dir = os.environ["PYRO_HOME"] + "/multigrid/tests/"
my_data = a.get_solution_object()
if store_bench:
my_data.write("{}/{}".format(bench_dir, bench))
# do we do a comparison?
if comp_bench:
compare_file = "{}/{}".format(bench_dir, bench)
msg.warning("comparing to: %s " % (compare_file))
bench = io.read(compare_file)
result = compare.compare(my_data, bench, rtol)
if result == 0:
msg.success("results match benchmark to within relative tolerance of {}\n".format(rtol))
else:
msg.warning("ERROR: " + compare.errors[result] + "\n")
return result
# normal return -- error wrt true solution
return enorm
if __name__ == "__main__":
N = [16, 32, 64]
err = []
plot = False
store = False
do_compare = False
for nx in N:
if nx == max(N):
plot = True
enorm = test_general_poisson_dirichlet(nx, make_plot=plot,
store_bench=store, comp_bench=do_compare)
err.append(enorm)
# plot the convergence
N = np.array(N, dtype=np.float64)
err = np.array(err)
plt.clf()
plt.loglog(N, err, "x", color="r")
plt.loglog(N, err[0]*(N[0]/N)**2, "--", color="k")
plt.xlabel("N")
plt.ylabel("error")
fig = plt.gcf()
fig.set_size_inches(7.0, 6.0)
plt.tight_layout()
plt.savefig("mg_general_alphabeta_only_converge.png")
|
|
import collections
import re
from datetime import datetime, timedelta, timezone
from numbers import Number, Real, Integral
from math import isnan, floor
from pickle import PickleError
import numpy as np
from Orange.data import _variable
from Orange.util import Registry, color_to_hex, hex_to_color, Reprable
__all__ = ["Unknown", "MISSING_VALUES", "make_variable", "is_discrete_values",
"Value", "Variable", "ContinuousVariable", "DiscreteVariable",
"StringVariable", "TimeVariable"]
# For storing unknowns
Unknown = ValueUnknown = float("nan")
# For checking for unknowns
MISSING_VALUES = {np.nan, "?", "nan", ".", "", "NA", "~", None}
DISCRETE_MAX_VALUES = 3 # == 2 + nan
def make_variable(cls, compute_value, *args):
if compute_value is not None:
return cls(*args, compute_value=compute_value)
return cls.make(*args)
def is_discrete_values(values):
"""
Return set of uniques if `values` is an iterable of discrete values
else False if non-discrete, or None if indeterminate.
Note
----
Assumes consistent type of items of `values`.
"""
if not len(values):
return None
# If the first few values are, or can be converted to, floats,
# the type is numeric
try:
isinstance(next(iter(values)), Number) or \
[float(v) for _, v in zip(range(min(3, len(values))), values)]
except ValueError:
is_numeric = False
max_values = int(round(len(values)**.7))
else:
is_numeric = True
max_values = DISCRETE_MAX_VALUES
# If more than max values => not discrete
unique = set()
for i in values:
unique.add(i)
if len(unique) > max_values:
return False
# Strip NaN from unique
unique = {i for i in unique
if (not i in MISSING_VALUES and
not (isinstance(i, Number) and np.isnan(i)))}
# All NaNs => indeterminate
if not unique:
return None
# Strings with |values| < max_unique
if not is_numeric:
return unique
# Handle numbers
try:
unique_float = set(map(float, unique))
except ValueError:
# Converting all the values to floats resulted in an error.
# Since the values have enough unique values, they are probably
# string values and discrete.
return unique
# If only values are {0, 1} or {1, 2} (or a subset of those sets) => discrete
return (not (unique_float - {0, 1}) or
not (unique_float - {1, 2})) and unique
class Value(float):
"""
The class representing a value. The class is not used to store values but
only to return them in contexts in which we want the value to be accompanied
with the descriptor, for instance to print the symbolic value of discrete
variables.
The class is derived from `float`, with an additional attribute `variable`
which holds the descriptor of type :obj:`Orange.data.Variable`. If the
value continuous or discrete, it is stored as a float. Other types of
values, like strings, are stored in the attribute `value`.
The class overloads the methods for printing out the value:
`variable.repr_val` and `variable.str_val` are used to get a suitable
representation of the value.
Equivalence operator is overloaded as follows:
- unknown values are equal; if one value is unknown and the other is not,
they are different;
- if the value is compared with the string, the value is converted to a
string using `variable.str_val` and the two strings are compared
- if the value is stored in attribute `value`, it is compared with the
given other value
- otherwise, the inherited comparison operator for `float` is called.
Finally, value defines a hash, so values can be put in sets and appear as
keys in dictionaries.
.. attribute:: variable (:obj:`Orange.data.Variable`)
Descriptor; used for printing out and for comparing with strings
.. attribute:: value
Value; the value can be of arbitrary type and is used only for variables
that are neither discrete nor continuous. If `value` is `None`, the
derived `float` value is used.
"""
__slots__ = "variable", "_value"
def __new__(cls, variable, value=Unknown):
"""
Construct a new instance of Value with the given descriptor and value.
If the argument `value` can be converted to float, it is stored as
`float` and the attribute `value` is set to `None`. Otherwise, the
inherited float is set to `Unknown` and the value is held by the
attribute `value`.
:param variable: descriptor
:type variable: Orange.data.Variable
:param value: value
"""
if variable.is_primitive():
self = super().__new__(cls, value)
self.variable = variable
self._value = None
else:
isunknown = value == variable.Unknown
self = super().__new__(
cls, np.nan if isunknown else np.finfo(float).min)
self.variable = variable
self._value = value
return self
def __init__(self, _, __=Unknown):
pass
def __repr__(self):
return "Value('%s', %s)" % (self.variable.name,
self.variable.repr_val(self))
def __str__(self):
return self.variable.str_val(self)
def __eq__(self, other):
if isinstance(self, Real) and isnan(self):
return (isinstance(other, Real) and isnan(other)
or other in self.variable.unknown_str)
if isinstance(other, str):
return self.variable.str_val(self) == other
if isinstance(other, Value):
return self.value == other.value
return super().__eq__(other)
def __ne__(self, other):
return not self.__eq__(other)
def __lt__(self, other):
if self.variable.is_primitive():
if isinstance(other, str):
return super().__lt__(self.variable.to_val(other))
else:
return super().__lt__(other)
else:
if isinstance(other, str):
return self.value < other
else:
return self.value < other.value
def __le__(self, other):
return self.__lt__(other) or self.__eq__(other)
def __gt__(self, other):
return not self.__le__(other)
def __ge__(self, other):
return not self.__lt__(other)
def __contains__(self, other):
if (self._value is not None
and isinstance(self._value, str)
and isinstance(other, str)):
return other in self._value
raise TypeError("invalid operation on Value()")
def __hash__(self):
if self._value is None:
return super().__hash__()
else:
return hash((super().__hash__(), self._value))
@property
def value(self):
if self.variable.is_discrete:
return Unknown if isnan(self) else self.variable.values[int(self)]
if self.variable.is_string:
return self._value
return float(self)
def __getnewargs__(self):
return self.variable, float(self)
def __getstate__(self):
return dict(value=getattr(self, '_value', None))
def __setstate__(self, state):
self._value = state.get('value', None)
class VariableMeta(Registry):
def __new__(cls, name, bases, attrs):
obj = super().__new__(cls, name, bases, attrs)
if not hasattr(obj, '_all_vars') or obj._all_vars is Variable._all_vars:
obj._all_vars = {}
return obj
class Variable(Reprable, metaclass=VariableMeta):
"""
The base class for variable descriptors contains the variable's
name and some basic properties.
.. attribute:: name
The name of the variable.
.. attribute:: unknown_str
A set of values that represent unknowns in conversion from textual
formats. Default is `{"?", ".", "", "NA", "~", None}`.
.. attribute:: compute_value
A function for computing the variable's value when converting from
another domain which does not contain this variable. The base class
defines a static method `compute_value`, which returns `Unknown`.
Non-primitive variables must redefine it to return `None`.
.. attribute:: source_variable
An optional descriptor of the source variable - if any - from which
this variable is derived and computed via :obj:`compute_value`.
.. attribute:: attributes
A dictionary with user-defined attributes of the variable
.. attribute:: master
The variable that this variable is a copy of. If a copy is made from a
copy, the copy has a reference to the original master. If the variable
is not a copy, it is its own master.
"""
Unknown = ValueUnknown
def __init__(self, name="", compute_value=None):
"""
Construct a variable descriptor.
"""
self.name = name
self._compute_value = compute_value
self.unknown_str = MISSING_VALUES
self.source_variable = None
self.attributes = {}
self.master = self
if name and compute_value is None:
if isinstance(self._all_vars, collections.defaultdict):
self._all_vars[name].append(self)
else:
self._all_vars[name] = self
self._colors = None
def make_proxy(self):
"""
Copy the variable and set the master to `self.master` or to `self`.
:return: copy of self
:rtype: Variable
"""
var = self.__class__()
var.__dict__.update(self.__dict__)
var.master = self.master
return var
def __eq__(self, other):
"""Two variables are equivalent if the originate from the same master"""
return hasattr(other, "master") and self.master is other.master
def __hash__(self):
return super().__hash__()
@classmethod
def make(cls, name):
"""
Return an existing continuous variable with the given name, or
construct and return a new one.
"""
if not name:
raise ValueError("Variables without names cannot be stored or made")
return cls._all_vars.get(name) or cls(name)
@classmethod
def _clear_cache(cls):
"""
Clear the list of variables for reuse by :obj:`make`.
"""
cls._all_vars.clear()
@staticmethod
def _clear_all_caches():
"""
Clears list of stored variables for all subclasses
"""
for cls in Variable.registry.values():
cls._clear_cache()
@classmethod
def is_primitive(cls):
"""
`True` if the variable's values are stored as floats.
Non-primitive variables can appear in the data only as meta attributes.
"""
return issubclass(cls, (DiscreteVariable, ContinuousVariable))
@property
def is_discrete(self):
return isinstance(self, DiscreteVariable)
@property
def is_continuous(self):
return isinstance(self, ContinuousVariable)
@property
def is_string(self):
return isinstance(self, StringVariable)
def repr_val(self, val):
"""
Return a textual representation of variable's value `val`. Argument
`val` must be a float (for primitive variables) or an arbitrary
Python object (for non-primitives).
Derived classes must overload the function.
"""
raise RuntimeError("variable descriptors must overload repr_val()")
str_val = repr_val
def to_val(self, s):
"""
Convert the given argument to a value of the variable. The
argument can be a string, a number or `None`. For primitive variables,
the base class provides a method that returns
:obj:`~Orange.data.Unknown` if `s` is found in
:obj:`~Orange.data.Variable.unknown_str`, and raises an exception
otherwise. For non-primitive variables it returns the argument itself.
Derived classes of primitive variables must overload the function.
:param s: value, represented as a number, string or `None`
:type s: str, float or None
:rtype: float or object
"""
if not self.is_primitive():
return s
if s in self.unknown_str:
return Unknown
raise RuntimeError(
"primitive variable descriptors must overload to_val()")
def val_from_str_add(self, s):
"""
Convert the given string to a value of the variable. The method
is similar to :obj:`to_val` except that it only accepts strings and
that it adds new values to the variable's domain where applicable.
The base class method calls `to_val`.
:param s: symbolic representation of the value
:type s: str
:rtype: float or object
"""
return self.to_val(s)
def __str__(self):
return self.name
@property
def compute_value(self):
return self._compute_value
def __reduce__(self):
if not self.name:
raise PickleError("Variables without names cannot be pickled")
return make_variable, (self.__class__, self._compute_value, self.name), self.__dict__
def copy(self, compute_value):
var = type(self)(self.name, compute_value=compute_value)
var.attributes = dict(self.attributes)
return var
class ContinuousVariable(Variable):
"""
Descriptor for continuous variables.
.. attribute:: number_of_decimals
The number of decimals when the value is printed out (default: 3).
.. attribute:: adjust_decimals
A flag regulating whether the `number_of_decimals` is being adjusted
by :obj:`to_val`.
The value of `number_of_decimals` is set to 3 and `adjust_decimals`
is set to 2. When :obj:`val_from_str_add` is called for the first
time with a string as an argument, `number_of_decimals` is set to the
number of decimals in the string and `adjust_decimals` is set to 1.
In the subsequent calls of `to_val`, the nubmer of decimals is
increased if the string argument has a larger number of decimals.
If the `number_of_decimals` is set manually, `adjust_decimals` is
set to 0 to prevent changes by `to_val`.
"""
TYPE_HEADERS = ('continuous', 'c')
def __init__(self, name="", number_of_decimals=None, compute_value=None):
"""
Construct a new continuous variable. The number of decimals is set to
three, but adjusted at the first call of :obj:`to_val`.
"""
super().__init__(name, compute_value)
if number_of_decimals is None:
self.number_of_decimals = 3
self.adjust_decimals = 2
else:
self.number_of_decimals = number_of_decimals
@property
def number_of_decimals(self):
return self._number_of_decimals
@property
def colors(self):
if self._colors is None:
try:
col1, col2, black = self.attributes["colors"]
self._colors = (hex_to_color(col1), hex_to_color(col2), black)
except (KeyError, ValueError):
# Stored colors were not available or invalid, use defaults
self._colors = ((0, 0, 255), (255, 255, 0), False)
return self._colors
@colors.setter
def colors(self, value):
col1, col2, black = self._colors = value
self.attributes["colors"] = \
[color_to_hex(col1), color_to_hex(col2), black]
# noinspection PyAttributeOutsideInit
@number_of_decimals.setter
def number_of_decimals(self, x):
self._number_of_decimals = x
self.adjust_decimals = 0
self._out_format = "%.{}f".format(self.number_of_decimals)
def to_val(self, s):
"""
Convert a value, given as an instance of an arbitrary type, to a float.
"""
if s in self.unknown_str:
return Unknown
return float(s)
def val_from_str_add(self, s):
"""
Convert a value from a string and adjust the number of decimals if
`adjust_decimals` is non-zero.
"""
return _variable.val_from_str_add_cont(self, s)
def repr_val(self, val):
"""
Return the value as a string with the prescribed number of decimals.
"""
if isnan(val):
return "?"
return self._out_format % val
str_val = repr_val
def copy(self, compute_value=None):
var = type(self)(self.name, self.number_of_decimals, compute_value)
var.attributes = dict(self.attributes)
return var
class DiscreteVariable(Variable):
"""
Descriptor for symbolic, discrete variables. Values of discrete variables
are stored as floats; the numbers corresponds to indices in the list of
values.
.. attribute:: values
A list of variable's values.
.. attribute:: ordered
Some algorithms (and, in particular, visualizations) may
sometime reorder the values of the variable, e.g. alphabetically.
This flag hints that the given order of values is "natural"
(e.g. "small", "middle", "large") and should not be changed.
.. attribute:: base_value
The index of the base value, or -1 if there is none. The base value is
used in some methods like, for instance, when creating dummy variables
for regression.
"""
TYPE_HEADERS = ('discrete', 'd')
_all_vars = collections.defaultdict(list)
presorted_values = []
def __init__(self, name="", values=(), ordered=False, base_value=-1, compute_value=None):
""" Construct a discrete variable descriptor with the given values. """
super().__init__(name, compute_value)
self.ordered = ordered
self.values = list(values)
self.base_value = base_value
@property
def colors(self):
if self._colors is None:
from Orange.widgets.utils.colorpalette import ColorPaletteGenerator
self._colors = ColorPaletteGenerator.palette(self)
colors = self.attributes.get('colors')
if colors:
self._colors[:len(colors)] = [hex_to_color(color) for color in colors]
self._colors.flags.writeable = False
return self._colors
@colors.setter
def colors(self, value):
self._colors = value
self._colors.flags.writeable = False
self.attributes["colors"] = [color_to_hex(col) for col in value]
def set_color(self, i, color):
self.colors = self.colors
self._colors.flags.writeable = True
self._colors[i, :] = color
self._colors.flags.writeable = False
self.attributes["colors"][i] = color_to_hex(color)
def to_val(self, s):
"""
Convert the given argument to a value of the variable (`float`).
If the argument is numeric, its value is returned without checking
whether it is integer and within bounds. `Unknown` is returned if the
argument is one of the representations for unknown values. Otherwise,
the argument must be a string and the method returns its index in
:obj:`values`.
:param s: values, represented as a number, string or `None`
:rtype: float
"""
if s is None:
return ValueUnknown
if isinstance(s, Integral):
return s
if isinstance(s, Real):
return s if isnan(s) else floor(s + 0.25)
if s in self.unknown_str:
return ValueUnknown
if not isinstance(s, str):
raise TypeError('Cannot convert {} to value of "{}"'.format(
type(s).__name__, self.name))
return self.values.index(s)
def add_value(self, s):
""" Add a value `s` to the list of values.
"""
self.values.append(s)
self._colors = None
def val_from_str_add(self, s):
"""
Similar to :obj:`to_val`, except that it accepts only strings and that
it adds the value to the list if it does not exist yet.
:param s: symbolic representation of the value
:type s: str
:rtype: float
"""
s = str(s) if s is not None else s
try:
return ValueUnknown if s in self.unknown_str \
else self.values.index(s)
except ValueError:
self.add_value(s)
return len(self.values) - 1
def repr_val(self, val):
"""
Return a textual representation of the value (`self.values[int(val)]`)
or "?" if the value is unknown.
:param val: value
:type val: float (should be whole number)
:rtype: str
"""
if isnan(val):
return "?"
return '{}'.format(self.values[int(val)])
str_val = repr_val
def __reduce__(self):
if not self.name:
raise PickleError("Variables without names cannot be pickled")
return make_variable, (self.__class__, self._compute_value, self.name,
self.values, self.ordered, self.base_value), \
self.__dict__
@classmethod
def make(cls, name, values=(), ordered=False, base_value=-1):
"""
Return a variable with the given name and other properties. The method
first looks for a compatible existing variable: the existing
variable must have the same name and both variables must have either
ordered or unordered values. If values are ordered, the order must be
compatible: all common values must have the same order. If values are
unordered, the existing variable must have at least one common value
with the new one, except when any of the two lists of values is empty.
If a compatible variable is find, it is returned, with missing values
appended to the end of the list. If there is no explicit order, the
values are ordered using :obj:`ordered_values`. Otherwise, it
constructs and returns a new variable descriptor.
:param name: the name of the variable
:type name: str
:param values: symbolic values for the variable
:type values: list
:param ordered: tells whether the order of values is fixed
:type ordered: bool
:param base_value: the index of the base value, or -1 if there is none
:type base_value: int
:returns: an existing compatible variable or `None`
"""
if not name:
raise ValueError("Variables without names cannot be stored or made")
var = cls._find_compatible(
name, values, ordered, base_value)
if var:
return var
if not ordered:
base_value_rep = base_value != -1 and values[base_value]
values = cls.ordered_values(values)
if base_value != -1:
base_value = values.index(base_value_rep)
return cls(name, values, ordered, base_value)
@classmethod
def _find_compatible(cls, name, values=(), ordered=False, base_value=-1):
"""
Return a compatible existing value, or `None` if there is None.
See :obj:`make` for details; this function differs by returning `None`
instead of constructing a new descriptor. (Method :obj:`make` calls
this function.)
:param name: the name of the variable
:type name: str
:param values: symbolic values for the variable
:type values: list
:param ordered: tells whether the order of values is fixed
:type ordered: bool
:param base_value: the index of the base value, or -1 if there is none
:type base_value: int
:returns: an existing compatible variable or `None`
"""
base_rep = base_value != -1 and values[base_value]
existing = cls._all_vars.get(name)
if existing is None:
return None
if not ordered:
values = cls.ordered_values(values)
for var in existing:
if (var.ordered != ordered or
var.base_value != -1
and var.values[var.base_value] != base_rep):
continue
if not values:
break # we have the variable - any existing values are OK
if not set(var.values) & set(values):
continue # empty intersection of values; not compatible
if ordered:
i = 0
for val in var.values:
if values[i] == val:
i += 1
if i == len(values):
break # we have all the values
else: # we have some remaining values: check them, add them
if set(values[i:]) & set(var.values):
continue # next var in existing
for val in values[i:]:
var.add_value(val)
break # we have the variable
else: # not ordered
vv = set(var.values)
for val in values:
if val not in vv:
var.add_value(val)
break # we have the variable
else:
return None
if base_value != -1 and var.base_value == -1:
var.base_value = var.values.index(base_rep)
return var
@staticmethod
def ordered_values(values):
"""
Return a sorted list of values. If there exists a prescribed order for
such set of values, it is returned. Otherwise, values are sorted
alphabetically.
"""
for presorted in DiscreteVariable.presorted_values:
if values == set(presorted):
return presorted
try:
return sorted(values, key=float)
except ValueError:
return sorted(values)
def copy(self, compute_value=None):
var = DiscreteVariable(self.name, self.values, self.ordered,
self.base_value, compute_value)
var.attributes = dict(self.attributes)
return var
class StringVariable(Variable):
"""
Descriptor for string variables. String variables can only appear as
meta attributes.
"""
Unknown = ""
TYPE_HEADERS = ('string', 's', 'text')
def to_val(self, s):
"""
Return the value as a string. If it is already a string, the same
object is returned.
"""
if s is None:
return ""
if isinstance(s, str):
return s
return str(s)
val_from_str_add = to_val
@staticmethod
def str_val(val):
"""Return a string representation of the value."""
if val is "":
return "?"
if isinstance(val, Value):
if val.value is "":
return "?"
val = val.value
return str(val)
def repr_val(self, val):
"""Return a string representation of the value."""
return '"{}"'.format(self.str_val(val))
class TimeVariable(ContinuousVariable):
"""
TimeVariable is a continuous variable with Unix epoch
(1970-01-01 00:00:00+0000) as the origin (0.0). Later dates are positive
real numbers (equivalent to Unix timestamp, with microseconds in the
fraction part), and the dates before it map to the negative real numbers.
Unfortunately due to limitation of Python datetime, only dates
with year >= 1 (A.D.) are supported.
If time is specified without a date, Unix epoch is assumed.
If time is specified wihout an UTC offset, localtime is assumed.
"""
TYPE_HEADERS = ('time', 't')
UNIX_EPOCH = datetime(1970, 1, 1)
_ISO_FORMATS = [
# have_date, have_time, format_str
# in order of decreased probability
(1, 1, '%Y-%m-%d %H:%M:%S%z'),
(1, 1, '%Y-%m-%d %H:%M:%S'),
(1, 1, '%Y-%m-%d %H:%M'),
(1, 1, '%Y-%m-%dT%H:%M:%S%z'),
(1, 1, '%Y-%m-%dT%H:%M:%S'),
(1, 0, '%Y-%m-%d'),
(1, 1, '%Y-%m-%d %H:%M:%S.%f'),
(1, 1, '%Y-%m-%dT%H:%M:%S.%f'),
(1, 1, '%Y-%m-%d %H:%M:%S.%f%z'),
(1, 1, '%Y-%m-%dT%H:%M:%S.%f%z'),
(1, 1, '%Y%m%dT%H%M%S%z'),
(1, 1, '%Y%m%d%H%M%S%z'),
(0, 1, '%H:%M:%S.%f'),
(0, 1, '%H:%M:%S'),
(0, 1, '%H:%M'),
# These parse as continuous features (plain numbers)
(1, 1, '%Y%m%dT%H%M%S'),
(1, 1, '%Y%m%d%H%M%S'),
(1, 0, '%Y%m%d'),
(1, 0, '%Y%j'),
(1, 0, '%Y'),
(0, 1, '%H%M%S.%f'),
# BUG: In Python as in C, %j doesn't necessitate 0-padding,
# so these two lines must be in this order
(1, 0, '%Y-%m'),
(1, 0, '%Y-%j'),
]
# The regex that matches all above formats
REGEX = (r'^('
r'\d{1,4}-\d{2}-\d{2}([ T]\d{2}:\d{2}(:\d{2}(\.\d+)?([+-]\d{4})?)?)?|'
r'\d{1,4}\d{2}\d{2}(T?\d{2}\d{2}\d{2}([+-]\d{4})?)?|'
r'\d{2}:\d{2}(:\d{2}(\.\d+)?)?|'
r'\d{2}\d{2}\d{2}\.\d+|'
r'\d{1,4}(-?\d{2,3})?'
r')$')
_matches_iso_format = re.compile(REGEX).match
# UTC offset and associated timezone. If parsed datetime values provide an
# offset, it is used for display. If not all values have the same offset,
# +0000 (=UTC) timezone is used and utc_offset is set to False.
utc_offset = None
timezone = timezone.utc
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.have_date = 0
self.have_time = 0
def copy(self, compute_value=None):
copy = super().copy(compute_value=compute_value)
copy.have_date = self.have_date
copy.have_time = self.have_time
return copy
@staticmethod
def _tzre_sub(s, _subtz=re.compile(r'([+-])(\d\d):(\d\d)$').sub):
# Replace +ZZ:ZZ with ISO-compatible +ZZZZ, or strip +0000
return s[:-6] if s.endswith(('+00:00', '-00:00')) else _subtz(r'\1\2\3', s)
def repr_val(self, val):
if isnan(val):
return '?'
if not self.have_date and not self.have_time:
# The time is relative, unitless. The value is absolute.
return str(val.value) if isinstance(val, Value) else str(val)
# If you know how to simplify this, be my guest
seconds = int(val)
microseconds = int(round((val - seconds) * 1e6))
if val < 0:
if microseconds:
seconds, microseconds = seconds - 1, int(1e6) + microseconds
date = datetime.fromtimestamp(0, tz=self.timezone) + timedelta(seconds=seconds)
else:
date = datetime.fromtimestamp(seconds, tz=self.timezone)
date = str(date.replace(microsecond=microseconds))
if self.have_date and not self.have_time:
date = date.split()[0]
elif not self.have_date and self.have_time:
date = date.split()[1]
date = self._tzre_sub(date)
return date
str_val = repr_val
def parse(self, datestr):
"""
Return `datestr`, a datetime provided in one of ISO 8601 formats,
parsed as a real number. Value 0 marks the Unix epoch, positive values
are the dates after it, negative before.
If date is unspecified, epoch date is assumed.
If time is unspecified, 00:00:00.0 is assumed.
If timezone is unspecified, local time is assumed.
"""
if datestr in MISSING_VALUES:
return Unknown
datestr = datestr.strip().rstrip('Z')
ERROR = ValueError("Invalid datetime format '{}'. "
"Only ISO 8601 supported.".format(datestr))
if not self._matches_iso_format(datestr):
try:
# If it is a number, assume it is a unix timestamp
value = float(datestr)
self.have_date = self.have_time = 1
return value
except ValueError:
raise ERROR
for i, (have_date, have_time, fmt) in enumerate(self._ISO_FORMATS):
try:
dt = datetime.strptime(datestr, fmt)
except ValueError:
continue
else:
# Pop this most-recently-used format to front
if 0 < i < len(self._ISO_FORMATS) - 2:
self._ISO_FORMATS[i], self._ISO_FORMATS[0] = \
self._ISO_FORMATS[0], self._ISO_FORMATS[i]
self.have_date |= have_date
self.have_time |= have_time
if not have_date:
dt = dt.replace(self.UNIX_EPOCH.year,
self.UNIX_EPOCH.month,
self.UNIX_EPOCH.day)
break
else:
raise ERROR
# Remember UTC offset. If not all parsed values share the same offset,
# remember none of it.
offset = dt.utcoffset()
if self.utc_offset is not False:
if offset and self.utc_offset is None:
self.utc_offset = offset
self.timezone = timezone(offset)
elif self.utc_offset != offset:
self.utc_offset = False
self.timezone = timezone.utc
# Convert time to UTC timezone. In dates without timezone,
# localtime is assumed. See also:
# https://docs.python.org/3.4/library/datetime.html#datetime.datetime.timestamp
if dt.tzinfo:
dt -= dt.utcoffset()
dt = dt.replace(tzinfo=timezone.utc)
# Unix epoch is the origin, older dates are negative
try:
return dt.timestamp()
except OverflowError:
return -(self.UNIX_EPOCH - dt).total_seconds()
def to_val(self, s):
"""
Convert a value, given as an instance of an arbitrary type, to a float.
"""
if isinstance(s, str):
return self.parse(s)
else:
return super().to_val(s)
|
|
# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Bring in all of the public TensorFlow interface into this module."""
import distutils as _distutils
import inspect as _inspect
import os as _os
import site as _site
import sys as _sys
import typing as _typing
# pylint: disable=g-bad-import-order
from tensorflow.python import pywrap_tensorflow # pylint: disable=unused-import
from tensorflow.python.tools import module_util as _module_util
from tensorflow.python.platform import tf_logging as _logging
from tensorflow.python.util.lazy_loader import LazyLoader as _LazyLoader
# API IMPORTS PLACEHOLDER
# WRAPPER_PLACEHOLDER
if "dev" in __version__: # pylint: disable=undefined-variable
_logging.warning("""
TensorFlow's `tf-nightly` package will soon be updated to TensorFlow 2.0.
Please upgrade your code to TensorFlow 2.0:
* https://www.tensorflow.org/guide/migrate
Or install the latest stable TensorFlow 1.X release:
* `pip install -U "tensorflow==1.*"`
Otherwise your code may be broken by the change.
""")
# Make sure directory containing top level submodules is in
# the __path__ so that "from tensorflow.foo import bar" works.
# We're using bitwise, but there's nothing special about that.
_API_MODULE = _sys.modules[__name__].bitwise # pylint: disable=undefined-variable
_current_module = _sys.modules[__name__]
_tf_api_dir = _os.path.dirname(_os.path.dirname(_API_MODULE.__file__))
if not hasattr(_current_module, '__path__'):
__path__ = [_tf_api_dir]
elif _tf_api_dir not in __path__:
__path__.append(_tf_api_dir)
# Hook external TensorFlow modules.
# Import compat before trying to import summary from tensorboard, so that
# reexport_tf_summary can get compat from sys.modules. Only needed if using
# lazy loading.
_current_module.compat.v2 # pylint: disable=pointless-statement
# Load tensorflow-io-gcs-filesystem if enabled
# pylint: disable=g-import-not-at-top
if (_os.getenv('TF_USE_MODULAR_FILESYSTEM', '0') == 'true' or
_os.getenv('TF_USE_MODULAR_FILESYSTEM', '0') == '1'):
import tensorflow_io_gcs_filesystem as _tensorflow_io_gcs_filesystem
# pylint: enable=g-import-not-at-top
# Lazy-load estimator.
_estimator_module = "tensorflow_estimator.python.estimator.api._v1.estimator"
estimator = _LazyLoader("estimator", globals(), _estimator_module)
_module_dir = _module_util.get_parent_dir_for_name(_estimator_module)
if _module_dir:
_current_module.__path__ = [_module_dir] + _current_module.__path__
setattr(_current_module, "estimator", estimator)
_keras_module = "keras.api._v1.keras"
keras = _LazyLoader("keras", globals(), _keras_module)
_module_dir = _module_util.get_parent_dir_for_name(_keras_module)
if _module_dir:
_current_module.__path__ = [_module_dir] + _current_module.__path__
setattr(_current_module, "keras", keras)
# Explicitly import lazy-loaded modules to support autocompletion.
# pylint: disable=g-import-not-at-top
if _typing.TYPE_CHECKING:
from tensorflow_estimator.python.estimator.api._v1 import estimator
# pylint: enable=g-import-not-at-top
from tensorflow.python.util.lazy_loader import LazyLoader # pylint: disable=g-import-not-at-top
_CONTRIB_WARNING = """
The TensorFlow contrib module will not be included in TensorFlow 2.0.
For more information, please see:
* https://github.com/tensorflow/community/blob/master/rfcs/20180907-contrib-sunset.md
* https://github.com/tensorflow/addons
* https://github.com/tensorflow/io (for I/O related ops)
If you depend on functionality not listed there, please file an issue.
"""
contrib = LazyLoader('contrib', globals(), 'tensorflow.contrib',
_CONTRIB_WARNING)
del LazyLoader
# The templated code that replaces the placeholder above sometimes
# sets the __all__ variable. If it does, we have to be sure to add
# "contrib".
if '__all__' in vars():
vars()['__all__'].append('contrib')
from tensorflow.python.platform import flags # pylint: disable=g-import-not-at-top
# The 'app' module will be imported as part of the placeholder section above.
_current_module.app.flags = flags # pylint: disable=undefined-variable
setattr(_current_module, "flags", flags)
_major_api_version = 1
# Add module aliases from Keras to TF.
# Some tf endpoints actually lives under Keras.
if hasattr(_current_module, "keras"):
# It is possible that keras is a lazily loaded module, which might break when
# actually trying to import it. Have a Try-Catch to make sure it doesn't break
# when it doing some very initial loading, like tf.compat.v2, etc.
try:
_layer_package = "keras.api._v1.keras.__internal__.legacy.layers"
layers = _LazyLoader("layers", globals(), _layer_package)
_module_dir = _module_util.get_parent_dir_for_name(_layer_package)
if _module_dir:
_current_module.__path__ = [_module_dir] + _current_module.__path__
setattr(_current_module, "layers", layers)
_legacy_rnn_package = "keras.api._v1.keras.__internal__.legacy.rnn_cell"
_rnn_cell = _LazyLoader("legacy_rnn", globals(), _legacy_rnn_package)
_module_dir = _module_util.get_parent_dir_for_name(_legacy_rnn_package)
if _module_dir:
_current_module.nn.__path__ = [_module_dir] + _current_module.nn.__path__
_current_module.nn.rnn_cell = _rnn_cell
except ImportError:
pass
# Do an eager load for Keras' code so that any function/method that needs to
# happen at load time will trigger, eg registration of optimizers in the
# SavedModel registry.
if hasattr(_current_module, "keras"):
try:
keras._load()
except ImportError:
pass
# Load all plugin libraries from site-packages/tensorflow-plugins if we are
# running under pip.
# TODO(gunan): Enable setting an environment variable to define arbitrary plugin
# directories.
# TODO(gunan): Find a better location for this code snippet.
from tensorflow.python.framework import load_library as _ll
from tensorflow.python.lib.io import file_io as _fi
# Get sitepackages directories for the python installation.
_site_packages_dirs = []
_site_packages_dirs += [] if _site.USER_SITE is None else [_site.USER_SITE]
_site_packages_dirs += [_p for _p in _sys.path if 'site-packages' in _p]
if 'getsitepackages' in dir(_site):
_site_packages_dirs += _site.getsitepackages()
if 'sysconfig' in dir(_distutils):
_site_packages_dirs += [_distutils.sysconfig.get_python_lib()]
_site_packages_dirs = list(set(_site_packages_dirs))
# Find the location of this exact file.
_current_file_location = _inspect.getfile(_inspect.currentframe())
def _running_from_pip_package():
return any(
_current_file_location.startswith(dir_) for dir_ in _site_packages_dirs)
if _running_from_pip_package():
# TODO(gunan): Add sanity checks to loaded modules here.
# Load first party dynamic kernels.
_tf_dir = _os.path.dirname(_current_file_location)
_kernel_dir = _os.path.join(_tf_dir, 'core', 'kernels')
if _os.path.exists(_kernel_dir):
_ll.load_library(_kernel_dir)
# Load third party dynamic kernels.
for _s in _site_packages_dirs:
_plugin_dir = _os.path.join(_s, 'tensorflow-plugins')
if _os.path.exists(_plugin_dir):
_ll.load_library(_plugin_dir)
# Load Pluggable Device Library
_ll.load_pluggable_device_library(_plugin_dir)
# Delete modules that should be hidden from dir().
# Don't fail if these modules are not available.
# For e.g. this file will be originally placed under tensorflow/_api/v1 which
# does not have 'python', 'core' directories. Then, it will be copied
# to tensorflow/ which does have these two directories.
# pylint: disable=undefined-variable
try:
del python
except NameError:
pass
try:
del core
except NameError:
pass
try:
del compiler
except NameError:
pass
# __all__ PLACEHOLDER
|
|
"""
********************************************************************************
* Name: gen_commands.py
* Author: Nathan Swain
* Created On: 2015
* Copyright: (c) Brigham Young University 2015
* License: BSD 2-Clause
********************************************************************************
"""
import os
from pathlib import Path
import string
import random
from conda.cli.python_api import run_command, Commands
from yaml import safe_load
from distro import linux_distribution
from jinja2 import Template
from django.conf import settings
from tethys_apps.utilities import get_tethys_home_dir, get_tethys_src_dir
from tethys_cli.cli_colors import write_error, write_info, write_warning
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "tethys_portal.settings")
GEN_APACHE_OPTION = 'apache'
GEN_ASGI_SERVICE_OPTION = 'asgi_service'
GEN_NGINX_OPTION = 'nginx'
GEN_NGINX_SERVICE_OPTION = 'nginx_service'
GEN_PORTAL_OPTION = 'portal_config'
GEN_SERVICES_OPTION = 'services'
GEN_INSTALL_OPTION = 'install'
GEN_META_YAML_OPTION = 'metayaml'
FILE_NAMES = {
GEN_APACHE_OPTION: 'tethys-default.conf',
GEN_ASGI_SERVICE_OPTION: 'asgi_supervisord.conf',
GEN_NGINX_OPTION: 'tethys_nginx.conf',
GEN_NGINX_SERVICE_OPTION: 'nginx_supervisord.conf',
GEN_PORTAL_OPTION: 'portal_config.yml',
GEN_SERVICES_OPTION: 'services.yml',
GEN_INSTALL_OPTION: 'install.yml',
GEN_META_YAML_OPTION: 'meta.yaml'
}
VALID_GEN_OBJECTS = (
# GEN_APACHE_OPTION,
GEN_ASGI_SERVICE_OPTION,
GEN_NGINX_OPTION,
GEN_NGINX_SERVICE_OPTION,
GEN_PORTAL_OPTION,
GEN_SERVICES_OPTION,
GEN_INSTALL_OPTION,
GEN_META_YAML_OPTION
)
TETHYS_SRC = get_tethys_src_dir()
TETHYS_HOME = get_tethys_home_dir()
def add_gen_parser(subparsers):
# Setup generate command
gen_parser = subparsers.add_parser('gen', help='Aids the installation of Tethys by automating the '
'creation of supporting files.')
gen_parser.add_argument('type', help='The type of object to generate.', choices=VALID_GEN_OBJECTS)
gen_parser.add_argument('-d', '--directory', help='Destination directory for the generated object.')
gen_parser.add_argument('-p', '--pin-level', choices=['major', 'minor', 'patch', 'none'],
help='Level to pin dependencies when generating the meta.yaml. One of "major", "minor", '
'"patch", or "none". Defaults to "none".')
gen_parser.add_argument('--client-max-body-size', dest='client_max_body_size',
help='Populate the client_max_body_size parameter for nginx config. Defaults to "75M".')
gen_parser.add_argument('--asgi-processes', dest='asgi_processes',
help='The maximum number of asgi worker processes. Defaults to 1.')
gen_parser.add_argument('--conda-prefix', dest='conda_prefix',
help='The path to the Tethys conda environment. Required if $CONDA_PREFIX is not defined.')
gen_parser.add_argument('--tethys-port', dest='tethys_port',
help='Port for the Tethys Server to run on in production. This is used when generating the '
'Daphne and nginx configuration files. Defaults to 8000.')
gen_parser.add_argument('--overwrite', dest='overwrite', action='store_true',
help='Overwrite existing file without prompting.')
gen_parser.set_defaults(func=generate_command, client_max_body_size='75M', asgi_processes=1, conda_prefix=False,
tethys_port=8000, overwrite=False, pin_level='none')
def get_environment_value(value_name):
value = os.environ.get(value_name)
if value is not None:
return value
else:
raise EnvironmentError(f'Environment value "{value_name}" must be set before generating this file.')
def get_settings_value(value_name):
value = getattr(settings, value_name, None)
if value is not None:
return value
else:
raise ValueError(f'Settings value "{value_name}" must be set before generating this file.')
def generate_secret_key():
return ''.join([random.choice(string.ascii_letters + string.digits) for _ in range(50)])
def gen_nginx(args):
hostname = str(settings.ALLOWED_HOSTS[0]) if len(settings.ALLOWED_HOSTS) > 0 else '127.0.0.1'
workspaces_root = get_settings_value('TETHYS_WORKSPACES_ROOT')
static_root = get_settings_value('STATIC_ROOT')
context = {
'hostname': hostname,
'workspaces_root': workspaces_root,
'static_root': static_root,
'client_max_body_size': args.client_max_body_size,
'port': args.tethys_port
}
return context
def gen_asgi_service(args):
nginx_user = ''
nginx_conf_path = '/etc/nginx/nginx.conf'
if os.path.exists(nginx_conf_path):
with open(nginx_conf_path, 'r') as nginx_conf:
for line in nginx_conf.readlines():
tokens = line.split()
if len(tokens) > 0 and tokens[0] == 'user':
nginx_user = tokens[1].strip(';')
break
conda_prefix = args.conda_prefix if args.conda_prefix else get_environment_value('CONDA_PREFIX')
conda_home = Path(conda_prefix).parents[1]
user_option_prefix = ''
try:
linux_distro = linux_distribution(full_distribution_name=0)[0]
if linux_distro in ['redhat', 'centos']:
user_option_prefix = 'http-'
except Exception:
pass
context = {
'nginx_user': nginx_user,
'port': args.tethys_port,
'asgi_processes': args.asgi_processes,
'conda_prefix': conda_prefix,
'conda_home': conda_home,
'tethys_src': TETHYS_SRC,
'tethys_home': TETHYS_HOME,
'user_option_prefix': user_option_prefix
}
return context
def gen_nginx_service(args):
context = {}
return context
def gen_portal_yaml(args):
write_info(f'A Tethys Portal configuration file is being generated at '
f'{get_tethys_home_dir() + "/" + FILE_NAMES[GEN_PORTAL_OPTION]}. '
f'Please review the file and fill in the appropriate settings.')
context = {'SECRET_KEY': generate_secret_key()}
return context
def gen_services_yaml(args):
context = {}
return context
def derive_version_from_conda_environment(dep_str, level='none'):
"""
Determine dependency string based on the current tethys environment.
Args:
dep_str(str): The dep string from the environment.yml (e.g. 'python>=3.6').
level(str): Level to lock dependencies to. One of 'major', 'minor', 'patch', or None. Defaults to 'minor'.
Returns:
str: the dependency string.
"""
stdout, stderr, ret = run_command(Commands.LIST, dep_str)
if ret != 0:
print(f'ERROR: Something went wrong looking up dependency "{dep_str}" in environment')
print(stderr)
return dep_str
lines = stdout.split('\n')
for line in lines:
if line.startswith('#'):
continue
try:
package, version, build, channel = line.split()
except ValueError:
continue
if package != dep_str:
continue
version_numbers = version.split('.')
if level == 'major':
if len(version_numbers) >= 2:
dep_str = f'{package}={version_numbers[0]}.*'
if len(version_numbers) == 1:
dep_str = f'{package}={version_numbers[0]}'
elif level == 'minor':
if len(version_numbers) >= 3:
dep_str = f'{package}={version_numbers[0]}.{version_numbers[1]}.*'
elif len(version_numbers) == 2:
dep_str = f'{package}={version_numbers[0]}.{version_numbers[1]}'
elif level == 'patch':
if len(version_numbers) > 3:
dep_str = f'{package}={version_numbers[0]}.{version_numbers[1]}.{version_numbers[2]}.*'
elif len(version_numbers) >= 1:
dep_str = f'{package}={".".join(version_numbers)}'
return dep_str
def gen_meta_yaml(args):
environment_file_path = os.path.join(TETHYS_SRC, 'environment.yml')
with open(environment_file_path, 'r') as env_file:
environment = safe_load(env_file)
dependencies = environment.get('dependencies', [])
run_requirements = []
for dependency in dependencies:
if not any([operator in dependency for operator in ['=', '<', '>']]):
conda_env_version = derive_version_from_conda_environment(dependency, level=args.pin_level)
run_requirements.append(conda_env_version)
else:
run_requirements.append(dependency)
context = dict(run_requirements=run_requirements)
return context
def gen_install(args):
write_info('Please review the generated install.yml file and fill in the appropriate information '
'(app name is required).')
context = {}
return context
def get_destination_path(args):
# Determine destination file name (defaults to type)
destination_file = FILE_NAMES[args.type]
# Default destination path is the tethys_portal source dir
destination_dir = TETHYS_HOME
# Make the Tethys Home directory if it doesn't exist yet.
if not os.path.isdir(destination_dir):
os.makedirs(destination_dir, exist_ok=True)
if args.type in [GEN_SERVICES_OPTION, GEN_INSTALL_OPTION]:
destination_dir = os.getcwd()
elif args.type == GEN_META_YAML_OPTION:
destination_dir = os.path.join(TETHYS_SRC, 'conda.recipe')
if args.directory:
destination_dir = os.path.abspath(args.directory)
if not os.path.isdir(destination_dir):
write_error('ERROR: "{0}" is not a valid directory.'.format(destination_dir))
exit(1)
destination_path = os.path.join(destination_dir, destination_file)
check_for_existing_file(destination_path, destination_file, args.overwrite)
return destination_path
def check_for_existing_file(destination_path, destination_file, overwrite):
# Check for pre-existing file
if os.path.isfile(destination_path):
valid_inputs = ('y', 'n', 'yes', 'no')
no_inputs = ('n', 'no')
if overwrite:
overwrite_input = 'yes'
else:
overwrite_input = input('WARNING: "{0}" already exists. '
'Overwrite? (y/n): '.format(destination_file)).lower()
while overwrite_input not in valid_inputs:
overwrite_input = input('Invalid option. Overwrite? (y/n): ').lower()
if overwrite_input in no_inputs:
write_warning('Generation of "{0}" cancelled.'.format(destination_file))
exit(0)
def render_template(file_type, context, destination_path):
# Determine template path
gen_templates_dir = os.path.join(os.path.abspath(os.path.dirname(__file__)), 'gen_templates')
template_path = os.path.join(gen_templates_dir, file_type)
# Parse template
template = Template(open(template_path).read())
# Render template and write to file
if template:
with open(destination_path, 'w') as f:
f.write(template.render(context))
def write_path_to_console(file_path):
write_info(f'File generated at "{file_path}".')
GEN_COMMANDS = {
GEN_ASGI_SERVICE_OPTION: gen_asgi_service,
GEN_NGINX_OPTION: gen_nginx,
GEN_NGINX_SERVICE_OPTION: gen_nginx_service,
GEN_PORTAL_OPTION: gen_portal_yaml,
GEN_SERVICES_OPTION: gen_services_yaml,
GEN_INSTALL_OPTION: gen_install,
GEN_META_YAML_OPTION: gen_meta_yaml
}
def generate_command(args):
"""
Generate a settings file for a new installation.
"""
# Setup variables
context = GEN_COMMANDS[args.type](args)
destination_path = get_destination_path(args)
render_template(args.type, context, destination_path)
write_path_to_console(destination_path)
|
|
from __future__ import unicode_literals
from moto.core.responses import BaseResponse
from moto.core.utils import camelcase_to_underscores
from moto.ec2.utils import filters_from_querystring
class VPCs(BaseResponse):
def create_vpc(self):
cidr_block = self._get_param('CidrBlock')
instance_tenancy = self._get_param('InstanceTenancy', if_none='default')
amazon_provided_ipv6_cidr_blocks = self._get_param('AmazonProvidedIpv6CidrBlock')
vpc = self.ec2_backend.create_vpc(cidr_block, instance_tenancy,
amazon_provided_ipv6_cidr_block=amazon_provided_ipv6_cidr_blocks)
doc_date = '2013-10-15' if 'Boto/' in self.headers.get('user-agent', '') else '2016-11-15'
template = self.response_template(CREATE_VPC_RESPONSE)
return template.render(vpc=vpc, doc_date=doc_date)
def delete_vpc(self):
vpc_id = self._get_param('VpcId')
vpc = self.ec2_backend.delete_vpc(vpc_id)
template = self.response_template(DELETE_VPC_RESPONSE)
return template.render(vpc=vpc)
def describe_vpcs(self):
vpc_ids = self._get_multi_param('VpcId')
filters = filters_from_querystring(self.querystring)
vpcs = self.ec2_backend.get_all_vpcs(vpc_ids=vpc_ids, filters=filters)
doc_date = '2013-10-15' if 'Boto/' in self.headers.get('user-agent', '') else '2016-11-15'
template = self.response_template(DESCRIBE_VPCS_RESPONSE)
return template.render(vpcs=vpcs, doc_date=doc_date)
def describe_vpc_attribute(self):
vpc_id = self._get_param('VpcId')
attribute = self._get_param('Attribute')
attr_name = camelcase_to_underscores(attribute)
value = self.ec2_backend.describe_vpc_attribute(vpc_id, attr_name)
template = self.response_template(DESCRIBE_VPC_ATTRIBUTE_RESPONSE)
return template.render(vpc_id=vpc_id, attribute=attribute, value=value)
def modify_vpc_attribute(self):
vpc_id = self._get_param('VpcId')
for attribute in ('EnableDnsSupport', 'EnableDnsHostnames'):
if self.querystring.get('%s.Value' % attribute):
attr_name = camelcase_to_underscores(attribute)
attr_value = self.querystring.get('%s.Value' % attribute)[0]
self.ec2_backend.modify_vpc_attribute(
vpc_id, attr_name, attr_value)
return MODIFY_VPC_ATTRIBUTE_RESPONSE
def associate_vpc_cidr_block(self):
vpc_id = self._get_param('VpcId')
amazon_provided_ipv6_cidr_blocks = self._get_param('AmazonProvidedIpv6CidrBlock')
# todo test on AWS if can create an association for IPV4 and IPV6 in the same call?
cidr_block = self._get_param('CidrBlock') if not amazon_provided_ipv6_cidr_blocks else None
value = self.ec2_backend.associate_vpc_cidr_block(vpc_id, cidr_block, amazon_provided_ipv6_cidr_blocks)
if not amazon_provided_ipv6_cidr_blocks:
render_template = ASSOCIATE_VPC_CIDR_BLOCK_RESPONSE
else:
render_template = IPV6_ASSOCIATE_VPC_CIDR_BLOCK_RESPONSE
template = self.response_template(render_template)
return template.render(vpc_id=vpc_id, value=value, cidr_block=value['cidr_block'],
association_id=value['association_id'], cidr_block_state='associating')
def disassociate_vpc_cidr_block(self):
association_id = self._get_param('AssociationId')
value = self.ec2_backend.disassociate_vpc_cidr_block(association_id)
if "::" in value.get('cidr_block', ''):
render_template = IPV6_DISASSOCIATE_VPC_CIDR_BLOCK_RESPONSE
else:
render_template = DISASSOCIATE_VPC_CIDR_BLOCK_RESPONSE
template = self.response_template(render_template)
return template.render(vpc_id=value['vpc_id'], cidr_block=value['cidr_block'],
association_id=value['association_id'], cidr_block_state='disassociating')
CREATE_VPC_RESPONSE = """
<CreateVpcResponse xmlns="http://ec2.amazonaws.com/doc/{{doc_date}}/">
<requestId>7a62c49f-347e-4fc4-9331-6e8eEXAMPLE</requestId>
<vpc>
<vpcId>{{ vpc.id }}</vpcId>
<state>pending</state>
<cidrBlock>{{ vpc.cidr_block }}</cidrBlock>
{% if doc_date == "2016-11-15" %}
<cidrBlockAssociationSet>
{% for assoc in vpc.get_cidr_block_association_set() %}
<item>
<cidrBlock>{{assoc.cidr_block}}</cidrBlock>
<associationId>{{ assoc.association_id }}</associationId>
<cidrBlockState>
<state>{{assoc.cidr_block_state.state}}</state>
</cidrBlockState>
</item>
{% endfor %}
</cidrBlockAssociationSet>
<ipv6CidrBlockAssociationSet>
{% for assoc in vpc.get_cidr_block_association_set(ipv6=True) %}
<item>
<ipv6CidrBlock>{{assoc.cidr_block}}</ipv6CidrBlock>
<associationId>{{ assoc.association_id }}</associationId>
<ipv6CidrBlockState>
<state>{{assoc.cidr_block_state.state}}</state>
</ipv6CidrBlockState>
</item>
{% endfor %}
</ipv6CidrBlockAssociationSet>
{% endif %}
<dhcpOptionsId>{% if vpc.dhcp_options %}{{ vpc.dhcp_options.id }}{% else %}dopt-1a2b3c4d2{% endif %}</dhcpOptionsId>
<instanceTenancy>{{ vpc.instance_tenancy }}</instanceTenancy>
<tagSet>
{% for tag in vpc.get_tags() %}
<item>
<resourceId>{{ tag.resource_id }}</resourceId>
<resourceType>{{ tag.resource_type }}</resourceType>
<key>{{ tag.key }}</key>
<value>{{ tag.value }}</value>
</item>
{% endfor %}
</tagSet>
</vpc>
</CreateVpcResponse>"""
DESCRIBE_VPCS_RESPONSE = """
<DescribeVpcsResponse xmlns="http://ec2.amazonaws.com/doc/{{doc_date}}/">
<requestId>7a62c442-3484-4f42-9342-6942EXAMPLE</requestId>
<vpcSet>
{% for vpc in vpcs %}
<item>
<vpcId>{{ vpc.id }}</vpcId>
<state>{{ vpc.state }}</state>
<cidrBlock>{{ vpc.cidr_block }}</cidrBlock>
{% if doc_date == "2016-11-15" %}
<cidrBlockAssociationSet>
{% for assoc in vpc.get_cidr_block_association_set() %}
<item>
<cidrBlock>{{assoc.cidr_block}}</cidrBlock>
<associationId>{{ assoc.association_id }}</associationId>
<cidrBlockState>
<state>{{assoc.cidr_block_state.state}}</state>
</cidrBlockState>
</item>
{% endfor %}
</cidrBlockAssociationSet>
<ipv6CidrBlockAssociationSet>
{% for assoc in vpc.get_cidr_block_association_set(ipv6=True) %}
<item>
<ipv6CidrBlock>{{assoc.cidr_block}}</ipv6CidrBlock>
<associationId>{{ assoc.association_id }}</associationId>
<ipv6CidrBlockState>
<state>{{assoc.cidr_block_state.state}}</state>
</ipv6CidrBlockState>
</item>
{% endfor %}
</ipv6CidrBlockAssociationSet>
{% endif %}
<dhcpOptionsId>{% if vpc.dhcp_options %}{{ vpc.dhcp_options.id }}{% else %}dopt-7a8b9c2d{% endif %}</dhcpOptionsId>
<instanceTenancy>{{ vpc.instance_tenancy }}</instanceTenancy>
<isDefault>{{ vpc.is_default }}</isDefault>
<tagSet>
{% for tag in vpc.get_tags() %}
<item>
<resourceId>{{ tag.resource_id }}</resourceId>
<resourceType>{{ tag.resource_type }}</resourceType>
<key>{{ tag.key }}</key>
<value>{{ tag.value }}</value>
</item>
{% endfor %}
</tagSet>
</item>
{% endfor %}
</vpcSet>
</DescribeVpcsResponse>"""
DELETE_VPC_RESPONSE = """
<DeleteVpcResponse xmlns="http://ec2.amazonaws.com/doc/2016-11-15/">
<requestId>7a62c49f-347e-4fc4-9331-6e8eEXAMPLE</requestId>
<return>true</return>
</DeleteVpcResponse>
"""
DESCRIBE_VPC_ATTRIBUTE_RESPONSE = """
<DescribeVpcAttributeResponse xmlns="http://ec2.amazonaws.com/doc/2016-11-15/">
<requestId>7a62c49f-347e-4fc4-9331-6e8eEXAMPLE</requestId>
<vpcId>{{ vpc_id }}</vpcId>
<{{ attribute }}>
<value>{{ value }}</value>
</{{ attribute }}>
</DescribeVpcAttributeResponse>"""
MODIFY_VPC_ATTRIBUTE_RESPONSE = """
<ModifyVpcAttributeResponse xmlns="http://ec2.amazonaws.com/doc/2016-11-15/">
<requestId>7a62c49f-347e-4fc4-9331-6e8eEXAMPLE</requestId>
<return>true</return>
</ModifyVpcAttributeResponse>"""
ASSOCIATE_VPC_CIDR_BLOCK_RESPONSE = """
<AssociateVpcCidrBlockResponse xmlns="http://ec2.amazonaws.com/doc/2016-11-15/">
<requestId>7a62c49f-347e-4fc4-9331-6e8eEXAMPLE</requestId>
<vpcId>{{vpc_id}}</vpcId>
<cidrBlockAssociation>
<associationId>{{association_id}}</associationId>
<cidrBlock>{{cidr_block}}</cidrBlock>
<cidrBlockState>
<state>{{cidr_block_state}}</state>
</cidrBlockState>
</cidrBlockAssociation>
</AssociateVpcCidrBlockResponse>"""
DISASSOCIATE_VPC_CIDR_BLOCK_RESPONSE = """
<DisassociateVpcCidrBlockResponse xmlns="http://ec2.amazonaws.com/doc/2016-11-15/">
<requestId>7a62c49f-347e-4fc4-9331-6e8eEXAMPLE</requestId>
<vpcId>{{vpc_id}}</vpcId>
<cidrBlockAssociation>
<associationId>{{association_id}}</associationId>
<cidrBlock>{{cidr_block}}</cidrBlock>
<cidrBlockState>
<state>{{cidr_block_state}}</state>
</cidrBlockState>
</cidrBlockAssociation>
</DisassociateVpcCidrBlockResponse>"""
IPV6_ASSOCIATE_VPC_CIDR_BLOCK_RESPONSE = """
<AssociateVpcCidrBlockResponse xmlns="http://ec2.amazonaws.com/doc/2016-11-15/">
<requestId>33af6c54-1139-4d50-b4f7-15a8example</requestId>
<vpcId>{{vpc_id}}</vpcId>
<ipv6CidrBlockAssociation>
<associationId>{{association_id}}</associationId>
<ipv6CidrBlock>{{cidr_block}}</ipv6CidrBlock>
<ipv6CidrBlockState>
<state>{{cidr_block_state}}</state>
</ipv6CidrBlockState>
</ipv6CidrBlockAssociation>
</AssociateVpcCidrBlockResponse>"""
IPV6_DISASSOCIATE_VPC_CIDR_BLOCK_RESPONSE = """
<DisassociateVpcCidrBlockResponse xmlns="http://ec2.amazonaws.com/doc/2016-11-15/">
<requestId>33af6c54-1139-4d50-b4f7-15a8example</requestId>
<vpcId>{{vpc_id}}</vpcId>
<ipv6CidrBlockAssociation>
<associationId>{{association_id}}</associationId>
<ipv6CidrBlock>{{cidr_block}}</ipv6CidrBlock>
<ipv6CidrBlockState>
<state>{{cidr_block_state}}</state>
</ipv6CidrBlockState>
</ipv6CidrBlockAssociation>
</DisassociateVpcCidrBlockResponse>"""
|
|
# Copyright 2013, Red Hat, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
Client side of the network RPC API.
"""
from oslo.config import cfg
from oslo import messaging
from nova.objects import base as objects_base
from nova.openstack.common import jsonutils
from nova import rpc
rpcapi_opts = [
cfg.StrOpt('network_topic',
default='network',
help='The topic network nodes listen on'),
cfg.BoolOpt('multi_host',
default=False,
help='Default value for multi_host in networks. Also, if set, '
'some rpc network calls will be sent directly to host.'),
]
CONF = cfg.CONF
CONF.register_opts(rpcapi_opts)
rpcapi_cap_opt = cfg.StrOpt('network',
help='Set a version cap for messages sent to network services')
CONF.register_opt(rpcapi_cap_opt, 'upgrade_levels')
class NetworkAPI(object):
'''Client side of the network rpc API.
API version history:
1.0 - Initial version.
1.1 - Adds migrate_instance_[start|finish]
1.2 - Make migrate_instance_[start|finish] a little more flexible
1.3 - Adds fanout cast update_dns for multi_host networks
1.4 - Add get_backdoor_port()
1.5 - Adds associate
1.6 - Adds instance_uuid to _{dis,}associate_floating_ip
1.7 - Adds method get_floating_ip_pools to replace get_floating_pools
1.8 - Adds macs to allocate_for_instance
1.9 - Adds rxtx_factor to [add|remove]_fixed_ip, removes instance_uuid
from allocate_for_instance and instance_get_nw_info
... Grizzly supports message version 1.9. So, any changes to existing
methods in 1.x after that point should be done such that they can
handle the version_cap being set to 1.9.
1.10- Adds (optional) requested_networks to deallocate_for_instance
... Havana supports message version 1.10. So, any changes to existing
methods in 1.x after that point should be done such that they can
handle the version_cap being set to 1.10.
NOTE: remove unused method get_vifs_by_instance()
NOTE: remove unused method get_vif_by_mac_address()
NOTE: remove unused method get_network()
NOTE: remove unused method get_all_networks()
1.11 - Add instance to deallocate_for_instance(). Remove instance_id,
project_id, and host.
1.12 - Add instance to deallocate_fixed_ip()
... Icehouse supports message version 1.12. So, any changes to
existing methods in 1.x after that point should be done such that they
can handle the version_cap being set to 1.12.
'''
VERSION_ALIASES = {
'grizzly': '1.9',
'havana': '1.10',
'icehouse': '1.12',
}
def __init__(self, topic=None):
super(NetworkAPI, self).__init__()
topic = topic or CONF.network_topic
target = messaging.Target(topic=topic, version='1.0')
version_cap = self.VERSION_ALIASES.get(CONF.upgrade_levels.network,
CONF.upgrade_levels.network)
serializer = objects_base.NovaObjectSerializer()
self.client = rpc.get_client(target, version_cap, serializer)
# TODO(russellb): Convert this to named arguments. It's a pretty large
# list, so unwinding it all is probably best done in its own patch so it's
# easier to review.
def create_networks(self, ctxt, **kwargs):
return self.client.call(ctxt, 'create_networks', **kwargs)
def delete_network(self, ctxt, uuid, fixed_range):
return self.client.call(ctxt, 'delete_network',
uuid=uuid, fixed_range=fixed_range)
def disassociate_network(self, ctxt, network_uuid):
return self.client.call(ctxt, 'disassociate_network',
network_uuid=network_uuid)
def get_fixed_ip(self, ctxt, id):
return self.client.call(ctxt, 'get_fixed_ip', id=id)
def get_fixed_ip_by_address(self, ctxt, address):
return self.client.call(ctxt, 'get_fixed_ip_by_address',
address=address)
def get_floating_ip(self, ctxt, id):
return self.client.call(ctxt, 'get_floating_ip', id=id)
def get_floating_ip_pools(self, ctxt):
cctxt = self.client.prepare(version="1.7")
return cctxt.call(ctxt, 'get_floating_ip_pools')
def get_floating_ip_by_address(self, ctxt, address):
return self.client.call(ctxt, 'get_floating_ip_by_address',
address=address)
def get_floating_ips_by_project(self, ctxt):
return self.client.call(ctxt, 'get_floating_ips_by_project')
def get_floating_ips_by_fixed_address(self, ctxt, fixed_address):
return self.client.call(ctxt, 'get_floating_ips_by_fixed_address',
fixed_address=fixed_address)
def get_instance_id_by_floating_address(self, ctxt, address):
return self.client.call(ctxt, 'get_instance_id_by_floating_address',
address=address)
def allocate_floating_ip(self, ctxt, project_id, pool, auto_assigned):
return self.client.call(ctxt, 'allocate_floating_ip',
project_id=project_id, pool=pool,
auto_assigned=auto_assigned)
def deallocate_floating_ip(self, ctxt, address, affect_auto_assigned):
return self.client.call(ctxt, 'deallocate_floating_ip',
address=address,
affect_auto_assigned=affect_auto_assigned)
def associate_floating_ip(self, ctxt, floating_address, fixed_address,
affect_auto_assigned):
return self.client.call(ctxt, 'associate_floating_ip',
floating_address=floating_address,
fixed_address=fixed_address,
affect_auto_assigned=affect_auto_assigned)
def disassociate_floating_ip(self, ctxt, address, affect_auto_assigned):
return self.client.call(ctxt, 'disassociate_floating_ip',
address=address,
affect_auto_assigned=affect_auto_assigned)
def allocate_for_instance(self, ctxt, instance_id, project_id, host,
rxtx_factor, vpn, requested_networks, macs=None,
dhcp_options=None):
if CONF.multi_host:
cctxt = self.client.prepare(version='1.9', server=host)
else:
cctxt = self.client.prepare(version='1.9')
return cctxt.call(ctxt, 'allocate_for_instance',
instance_id=instance_id, project_id=project_id,
host=host, rxtx_factor=rxtx_factor, vpn=vpn,
requested_networks=requested_networks,
macs=jsonutils.to_primitive(macs))
def deallocate_for_instance(self, ctxt, instance, requested_networks=None):
cctxt = self.client
kwargs = {}
if self.client.can_send_version('1.11'):
version = '1.11'
kwargs['instance'] = instance
kwargs['requested_networks'] = requested_networks
else:
if self.client.can_send_version('1.10'):
version = '1.10'
kwargs['requested_networks'] = requested_networks
else:
version = '1.0'
kwargs['host'] = instance['host']
kwargs['instance_id'] = instance.uuid
kwargs['project_id'] = instance.project_id
if CONF.multi_host:
cctxt = cctxt.prepare(server=instance['host'], version=version)
return cctxt.call(ctxt, 'deallocate_for_instance', **kwargs)
def add_fixed_ip_to_instance(self, ctxt, instance_id, rxtx_factor,
host, network_id):
cctxt = self.client.prepare(version='1.9')
return cctxt.call(ctxt, 'add_fixed_ip_to_instance',
instance_id=instance_id, rxtx_factor=rxtx_factor,
host=host, network_id=network_id)
def remove_fixed_ip_from_instance(self, ctxt, instance_id, rxtx_factor,
host, address):
cctxt = self.client.prepare(version='1.9')
return cctxt.call(ctxt, 'remove_fixed_ip_from_instance',
instance_id=instance_id, rxtx_factor=rxtx_factor,
host=host, address=address)
def add_network_to_project(self, ctxt, project_id, network_uuid):
return self.client.call(ctxt, 'add_network_to_project',
project_id=project_id,
network_uuid=network_uuid)
def associate(self, ctxt, network_uuid, associations):
cctxt = self.client.prepare(version='1.5')
return cctxt.call(ctxt, 'associate',
network_uuid=network_uuid,
associations=associations)
def get_instance_nw_info(self, ctxt, instance_id, rxtx_factor, host,
project_id):
cctxt = self.client.prepare(version='1.9')
return cctxt.call(ctxt, 'get_instance_nw_info',
instance_id=instance_id, rxtx_factor=rxtx_factor,
host=host, project_id=project_id)
def validate_networks(self, ctxt, networks):
return self.client.call(ctxt, 'validate_networks', networks=networks)
def get_instance_uuids_by_ip_filter(self, ctxt, filters):
return self.client.call(ctxt, 'get_instance_uuids_by_ip_filter',
filters=filters)
def get_dns_domains(self, ctxt):
return self.client.call(ctxt, 'get_dns_domains')
def add_dns_entry(self, ctxt, address, name, dns_type, domain):
return self.client.call(ctxt, 'add_dns_entry',
address=address, name=name,
dns_type=dns_type, domain=domain)
def modify_dns_entry(self, ctxt, address, name, domain):
return self.client.call(ctxt, 'modify_dns_entry',
address=address, name=name, domain=domain)
def delete_dns_entry(self, ctxt, name, domain):
return self.client.call(ctxt, 'delete_dns_entry',
name=name, domain=domain)
def delete_dns_domain(self, ctxt, domain):
return self.client.call(ctxt, 'delete_dns_domain', domain=domain)
def get_dns_entries_by_address(self, ctxt, address, domain):
return self.client.call(ctxt, 'get_dns_entries_by_address',
address=address, domain=domain)
def get_dns_entries_by_name(self, ctxt, name, domain):
return self.client.call(ctxt, 'get_dns_entries_by_name',
name=name, domain=domain)
def create_private_dns_domain(self, ctxt, domain, av_zone):
return self.client.call(ctxt, 'create_private_dns_domain',
domain=domain, av_zone=av_zone)
def create_public_dns_domain(self, ctxt, domain, project):
return self.client.call(ctxt, 'create_public_dns_domain',
domain=domain, project=project)
def setup_networks_on_host(self, ctxt, instance_id, host, teardown):
# NOTE(tr3buchet): the call is just to wait for completion
return self.client.call(ctxt, 'setup_networks_on_host',
instance_id=instance_id, host=host,
teardown=teardown)
def set_network_host(self, ctxt, network_ref):
network_ref_p = jsonutils.to_primitive(network_ref)
return self.client.call(ctxt, 'set_network_host',
network_ref=network_ref_p)
def rpc_setup_network_on_host(self, ctxt, network_id, teardown, host):
# NOTE(tr3buchet): the call is just to wait for completion
cctxt = self.client.prepare(server=host)
return cctxt.call(ctxt, 'rpc_setup_network_on_host',
network_id=network_id, teardown=teardown)
# NOTE(russellb): Ideally this would not have a prefix of '_' since it is
# a part of the rpc API. However, this is how it was being called when the
# 1.0 API was being documented using this client proxy class. It should be
# changed if there was ever a 2.0.
def _rpc_allocate_fixed_ip(self, ctxt, instance_id, network_id, address,
vpn, host):
cctxt = self.client.prepare(server=host)
return cctxt.call(ctxt, '_rpc_allocate_fixed_ip',
instance_id=instance_id, network_id=network_id,
address=address, vpn=vpn)
def deallocate_fixed_ip(self, ctxt, address, host, instance):
kwargs = {}
if self.client.can_send_version('1.12'):
version = '1.12'
kwargs['instance'] = instance
else:
version = '1.0'
cctxt = self.client.prepare(server=host, version=version)
return cctxt.call(ctxt, 'deallocate_fixed_ip',
address=address, host=host, **kwargs)
def update_dns(self, ctxt, network_ids):
cctxt = self.client.prepare(fanout=True, version='1.3')
cctxt.cast(ctxt, 'update_dns', network_ids=network_ids)
# NOTE(russellb): Ideally this would not have a prefix of '_' since it is
# a part of the rpc API. However, this is how it was being called when the
# 1.0 API was being documented using this client proxy class. It should be
# changed if there was ever a 2.0.
def _associate_floating_ip(self, ctxt, floating_address, fixed_address,
interface, host, instance_uuid=None):
cctxt = self.client.prepare(server=host, version='1.6')
return cctxt.call(ctxt, '_associate_floating_ip',
floating_address=floating_address,
fixed_address=fixed_address,
interface=interface, instance_uuid=instance_uuid)
# NOTE(russellb): Ideally this would not have a prefix of '_' since it is
# a part of the rpc API. However, this is how it was being called when the
# 1.0 API was being documented using this client proxy class. It should be
# changed if there was ever a 2.0.
def _disassociate_floating_ip(self, ctxt, address, interface, host,
instance_uuid=None):
cctxt = self.client.prepare(server=host, version='1.6')
return cctxt.call(ctxt, '_disassociate_floating_ip',
address=address, interface=interface,
instance_uuid=instance_uuid)
def lease_fixed_ip(self, ctxt, address, host):
cctxt = self.client.prepare(server=host)
cctxt.cast(ctxt, 'lease_fixed_ip', address=address)
def release_fixed_ip(self, ctxt, address, host):
cctxt = self.client.prepare(server=host)
cctxt.cast(ctxt, 'release_fixed_ip', address=address)
def migrate_instance_start(self, ctxt, instance_uuid, rxtx_factor,
project_id, source_compute, dest_compute,
floating_addresses, host=None):
cctxt = self.client.prepare(server=host, version='1.2')
return cctxt.call(ctxt, 'migrate_instance_start',
instance_uuid=instance_uuid,
rxtx_factor=rxtx_factor,
project_id=project_id,
source=source_compute,
dest=dest_compute,
floating_addresses=floating_addresses)
def migrate_instance_finish(self, ctxt, instance_uuid, rxtx_factor,
project_id, source_compute, dest_compute,
floating_addresses, host=None):
cctxt = self.client.prepare(server=host, version='1.2')
return cctxt.call(ctxt, 'migrate_instance_finish',
instance_uuid=instance_uuid,
rxtx_factor=rxtx_factor,
project_id=project_id,
source=source_compute,
dest=dest_compute,
floating_addresses=floating_addresses)
|
|
import wx
import numpy as np #For the argmax() functionality
import math
import copy #This is needed because sometimes the temp lists modify their parent lists. So, deep copies are made.
class LogicCalculator:
"""
This is the main class for the calculator function.
First, it determines what functions to use. It does this by seeing what variables you gave it.
By seeing what variables you gave it, it can determine which equations are available.
New variables can be gained by solving simple equations. Thsi will gain acess to new equations to use.
Equations with one unknown are solved using a root finder.
Multiple Equations with multiple unknowns are solved using Gauss-Sidel.
Once the equations have been solved, it returns all the information it found. This is displayed on a new screen.
Psudocode:
- What have I been given? What do I need to find?
- What equations contain what I need to find?
- Which of those equations include the variables I have been given?
- What equations can I use to get the variables that I still need?
Note: Up till this point, no equations have been run. Just an analysis of what variables they contain.
- Solve the equations, and return your answer.
"""
def __init__(self,*args,**kwargs):
"""
'subject' is what subject you are solving for. It is a string.
'args' is [subject,goal]. There can be multiple goals, because goals is a list
'kwargs' is a dictionary containing all the variables, known and unknown.
"""
print('Begin Calculation')
self = LogicCalculator
#What have I been given?
self.subject = args[0][0]
print('\nsubject: ',self.subject, '\nmedium: ',self.medium)
#What is known? What is not known?
self.unknown,self.known = {},{} #Blank dictionaries
for i in kwargs.items():
if '@' not in i:
if ('unknown' in i) or ('' in i):
if i[0][0] != 'U': self.unknown.update({i[0]:i[1]})
else:
if i[0][0] != 'U': self.known.update({i[0]:i[1]})
#For now, we will not worry about these.
self.unknown.update({'MM':'unknown','R':'unknown','Tcr':'unknown','Pcr':'unknown','Vcr':'unknown'})
print('\nknown: ',self.known,'\nunknown: ',self.unknown)
#What do I need to find?
self.goal = args[1]
print('\ngoal: ',self.goal)
for element in ['known','unknown','goal']: #Set the values in the system
for item in getattr(self,element).items():
setattr(self,item[0],item[1])
#Equations
##Retrieve the correct Equation Database & other things pertaining to the subject.
print('Loading Database')
if self.subject == 'thermo':
from .logicThermoEquations import LogicThermoEquations
from .logicThermoTableLookup import TableUtilities
self.medium = args[2]
constants = LogicThermoEquations.constants()
for item in constants.items():
setattr(self,item[0],item[1]) #Record the constants
self.eqnDatabase = LogicThermoEquations.eqnDatabase(self)
print(' ~ Thermo Database Loaded')
table_utils = TableUtilities()
##Lookup all unknown values that can be gotten from the Thermo Tables
if self.medium in ['Water', 'R134a']:
pass #Get this working
else:
if 'MM' in self.unknown:
self.MM = table_utils.TableDecider(['A1',['Molar Mass',self.medium,'N/A']])
if 'R' in self.unknown:
self.R = table_utils.TableDecider(['A1',['Gas Constant R',self.medium,'N/A']])
if 'Tcr' in self.unknown:
self.Tcr = table_utils.TableDecider(['A1',['Critical Temperature',self.medium,'N/A']])
if 'Pcr' in self.unknown:
self.Pcr = table_utils.TableDecider(['A1',['Critical Pressure',self.medium,'N/A']])
if 'Vcr' in self.unknown:
self.Vcr = table_utils.TableDecider(['A1',['Critical Volume',self.medium,'N/A']])
for element in ['MM','R','Tcr','Pcr','Vcr']:
if element in self.unknown:
del self.unknown[element] #Remove found values from the unknown list
self.known.update({element:getattr(self,element)}) #Add found values to the known list
self.interThermoPassed = False #This will be true once it has been able to do a full table lookup.
self.intermediateStep(self)
# elif self.subject == 'statics': #This is to show how to add another subject.
# from .logicStaticsEquations import LogicStaticsEquations
# self.eqnDatabase = LogicStaticsEquations.eqnDatabase(self)
##Search through that Database for the relevant equations
temp = self.unknownGoalSetup(self)
self.equationFinder(self,temp)
#Solve the equations
self.solver(self,self.equations) #Find any others that can be found
#Return your answer
#Have it go through the answers and return only the ones that we want.
def intermediateStep(self):
"""
This does things that must be checked between steps of gauss sidel or linear solver.
For thermo, this is a check if the values of the tables can be/have been found.
Once self.interThermoPassed == True, then this doesn't run any more, because all values have been gotten from the tables.
"""
if self.subject == 'thermo':
if self.interThermoPassed == False:
table_utils = TableUtilities()
answer = table_utils.TableEnough(self.unknown, self.known, self.medium)
if answer != []: #There was enough
self.interThermoPassed = True
for element in answer:
setattr(self,element[0],element[1])
del self.unknown[element[0]]
self.known.update({element[0]:element[1]})
def eqnDatabaseContains(self,varsSearch,varsAfter):
"""
This simply searches for what equations in the database contain the given variables.
It then chooses the best one.
'varsSearch' is the variables to search for
'varsAfter' is the variables contained in the equation after the one before this new one.
"""
possibleEqns,possibleNames = {},[]
for var in varsSearch.items(): #Look at each variable in turn
# if var[1] != 0: #Don't waste time on the zero value variables (Remove this part?)
for eqn in self.eqnDatabase.items():
for after in varsAfter:
if (var[0] in eqn[1]) and (after in eqn[1]): #If it contains somthing I am looking for & a var from after
possibleEqns.update({eqn[0]:eqn[1]}) #Add that equation to possibleEqns
possibleNames.append(eqn[0])
if possibleEqns == {}: #There was nothing that fit the joint criteria above, just focus on the varsSearch.
for var in varsSearch.items(): #Look at each variable in turn
for eqn in self.eqnDatabase.items():
if var[0] in eqn[1]: #If it contains somthing I am looking for
possibleEqns.update({eqn[0]:eqn[1]}) #Add that equation to possibleEqns
possibleNames.append(eqn[0])
#Analyze each one.
countList = [[],[]]
self.varsAfter = varsAfter
for eqn in possibleEqns.items(): #Look at each possible equation in turn
nameList = ['unknown','varsAfter']
for i in range(2):
count = 0
for var in getattr(self,nameList[i]).keys(): #How many of each does it have?
if var in eqn[1]: count +=1
countList[i].append(count)
##Best: Has the most varAfter. If no varsAfter: Has the least unknowns, but atleast 2
indexList = [np.array(countList[0]).argmax(),np.array(countList[1]).argmin()]#Most varsAfter or least unknown
if countList[1][indexList[1]] > 0: self.myEqns.update({possibleNames[indexList[1]]:self.eqnDatabase[possibleNames[indexList[1]]]})
else: self.myEqns.update({possibleNames[indexList[0]]:self.eqnDatabase[possibleNames[indexList[0]]]})
def unknownGoalSetup(self):
"""
This creates a list of all the unknowns, with the goal tagged onto the end.
"""
temp = []
for item in self.unknown.items():
temp.append(item[0])
for item in self.goal.items():
temp.append(item[0])
return temp
def equationFinder(self,unknownList):
"""
This searches through the equation database for which equations can be used.
It then follows every pathway possible to get to the goal, and puts it in a dictionary.
The pathways that do not work are deleted from the dictionary.
It returns the equations with the variable you solve it for as a list that is in the order to be solved.
Limitations: This does not setup to solve 2 equations with 2 unknowns.
It sets up o solve solves 1 equation with 1 unknown, then another equation with 2 unknowns,
where one of the unknowns happens to be the same as the one that was just solved for.
"""
print('Searching the database')
n = (len(self.eqnDatabase))
col,row = -1,-1
earlyPaths = {} #These are the pathways that were completed before all unknowns were used up.
temp = {} #A dictionary of all the ways
wayCount = 0 #How many ways there are to solve it
myMatrix = [[],[[],[],[],[],[],[],[],[],[],[],[],[],[],[],[],[],[],[],[],[],[],[],[],[],[],[],[],[],[],[],[],[],[],[],[],[],[],[],[],[],[],[],[],[],[]]]
###This is still undr development.
#Input
known = {'T1': 453.15, 'P1': 130.0, 'V1': 0.07, 'P2': 80.0}
unknown = {'MM': 'unknown', 'x1': 'unknown', 'Cp': 'unknown', 'u2': 'unknown', 's1': 'unknown', 'V2': 'unknown', 'x2': 'unknown', 'R': 'unknown', 's2': 'unknown', 'v1': '', 'Q': 'unknown', 'v2': 'unknown', 'Pcr': 'unknown', 'T2': 'unknown', 'Cv': 'unknown', 'k': 'unknown', 'Cavg': 'unknown', 'Tcr': 'unknown', 'm2': 'unknown', 'roe': 'unknown', 'u1': 'unknown', 'Vcr': 'unknown', 'm1': 'unknown'}
goal = {'W': 'unknown'}
n = (len(dataBase))
col,row = 0,0
earlyPaths = {} #These are the pathways that were completed before all unknowns were used up.
paths = {} #A dictionary of all the ways
wayCount = 0 #How many ways there are to solve it
myMatrix = [[],[],[],[],[],[],[],[],[],[],[],[],[],[],[],[],[],[],[],[],[],[],[],[],[],[],[],[],[],[],[],[],[],[],[],[],[],[],[],[],[],[],[],[],[],[]]
for endVar in goal.items(): #First, label which variable is for which column
for var in unknown.items():
myMatrix[0].append(var[0])
myMatrix[0].append(endVar[0])
for eqn in dataBase.items(): #Second, label which equation is for which row
row += 1
myMatrix[row].append(eqn[0])
for item in unknown.items(): #Fill in the 1 and 0
if item[0] in eqn[1]:
myMatrix[row].append(1)
else:
myMatrix[row].append(0)
row = 0
for eqn in myMatrix[1:]: #Third, have it search for the 1 step solutions & non-useable equations.
row += 1
print(eqn[1:])
if sum(eqn[1:]) == 0: #Weed out the eqns that are pure 0
print(' =0')
del myMatrix[row]
row -= 1
if (1 not in eqn[1:-1]) and (eqn[-1] == 1): #Delete all that contain only my goal & store them (which is the last one in the list. Always.)
print(' Only Goal')
wayCount += 1
col = np.argmax(np.array(eqn[1:])) + 1 #Get the positio of the 1
temp = [myMatrix,myMatrix[row][0],myMatrix[0][col]] #[current myMatrix, eqn, var to solve for]
del myMatrix[row]
earlyPaths.update({'way'+str(wayCount):temp})
row -= 1
if sum(eqn[1:]) == 1:
print(' =1')
wayCount += 1
col = np.argmax(np.array(eqn[1:])) + 1 #Get the positio of the 1
temp = [-1,myMatrix[row][0],myMatrix[0][col]] #[current myMatrix, eqn, var to solve for]
myMatrixTemp = copy.deepcopy(myMatrix)
del myMatrixTemp[row] #Delete the row
temp[0] = myMatrixTemp
for i in range(len(myMatrix)): #Delete the column
del myMatrix[i][col]
paths.update({'way'+str(wayCount):temp})
row -= 1
row, passedAlready = 0, False
print(stop)
for k in range(len(unknown)-1):#Fourth. This loop will end just before only the goal var remains
for item in paths.items():
myMatrix = item[1][0][:]
for eqn in myMatrix[1:]: #Find the equations with 1 unknown.
row += 1
if sum(eqn[1:]) == 1:
col = np.argmax(np.array(eqn[1:])) + 1 #Get the positio of the 1
temp[1].append(myMatrix[row][0]) #Add eqn
temp[2].append(myMatrix[0][col]) #Add var
del myMatrix[row] #Delete the row
for i in range(len(myMatrix)): #Delete the column
del myMatrix[i][col]
temp[0] = myMatrix #Update current myMatrix
row -= 1
if passedAlready == False:
paths.update({item[0]:temp})
else: #There is another way that branches off of this one.
wayCount += 1
paths.update({'way'+str(wayCount):temp})
passedAlready = True
print(myMatrix)
print('\n','early',earlyPaths)
print('\n','paths',paths)
print(stop)
#{way1: [[eqnsLeft],[[varAvailable],[varToSolveFor]],[eqnsPathway]], way2: ...
#eqnsPathway: [[eqn1,var1],[eqn2,var2],...
def solver(self,eqns):
"""
This takes all the equations given to it and does either 'Linear Solve' or 'Gauss-Sidel' until it finds an answer.
If it diverges during Gauss-Sidel, it re-arranges the equations using sympy.
It saves all the variables to the class function.
If there are multiple ways to solve the problem, it chooses one at random, and if that doesn't work it deletes it.
It then tries the next way. Theoretically, any way that is provided it should work. This is just a precaution.
~~~ A future update could handle requests for alternative ways to solve it.
Note: This does not handle gaussSidel equations yet.
~~~ For the goal, it would be nice if it returned (1) a rounded answer, and (2) in the units your goal is in.
"""
print('Solving')
print(self.equations)
for item in self.equations:
answer = self.ridder(self,item[0]+'Eqn',item[1])
setattr('self',item[1],answer[0])
if item != self.equations[-1]:
print('I used equation ',item[0],' and solved for ',item[1],'. The answer was ',answer[0],' with a percent error of ',answer[1])
else:
print('I used equation ',item[0],' and solved for ',item[1],', your goal. The answer was ',answer[0],' with a percent error of ',answer[1])
print('Thank You for using our program.')
def f(self,fn):
"""
This function simply runs a function and returns the answer.
"""
if self.subject == 'thermo':
return LogicThermoEquations.fn(self)
# elif self.subject == 'statics':
# return LogicStaticsEquations.fn(self)
def ridder(self,eq,var,guess=[-10*10**90,10*10**90],erdes=0.00001):
"""
Solves one equation for one unknown using ridder's methood.
'eq' is the equation to be solved.
'var' is the variable that is being solved for.
'guess' is the bounds which the answer is in.
'erdes' is the desired error
"""
x1, x2, erdes = guess[0], guess[1], erdes/100
n = math.ceil(math.log(abs(x1-x2)/erdes)/math.log(2))
for i in range(int(n)):
setattr('self',var,x1)
f1 = f(eq)
setattr('self',var,x2)
f2 = f(eq)
x3 = (x1+x2)/2
setattr('self',var,x3)
f3 = f(eq)
x4 = x3+(x3-x1)*np.sign(f1-f2)*f3/math.sqrt(f3**2-f1*f2)
setattr('self',var,x4)
f4 = f(eq)
if f3*f4<0: x1,x2 = x3,x4
elif f1*f4<0: x2 = x4
elif f2*f4<0: x1 = x4
else: break
error = abs((x1-x2)/x2)*100
return x4,error
def gauss(self,eqns,erdes=0.00001):
"""
'eqns' is the equation names. [eq1,eq2,eq3]. They are solved in that order.
'erdes' is the desired error.
Example Input: gauss([f1,f2],0.01)
"""
noFun = len(eqns)
var = [3.14688]*noFun
varNew = var[:]
error = [10000]*noFun
varDif=[3.14688]*noFun
nHistory =[]
count = 0
while max(error) > erdes:
for i in range(noFun): varNew[i] = f(eqns[i],var) #solve each function
for i in range(noFun): error[i],varDif[i] = abs((varNew[i]-var[i])/varNew[i]),(abs(varNew[i]-var[i])/2)
for i in range(noFun):
if varDif[i]==max(varDif):
n=i
nHistory.append(varNew[n])
count,var = count + 1,copy.deepcopy(varNew)
if count == 10: #This Must always be an even Number. #It hasn't begun to converge within 10 iterations. So, Is it diverging?
var = [3.14688]*noFun
varNew = var[:]
halfLength = len(nHistory)/2
firstHalf = 0
secondHalf = 0
for i in range(int(halfLength)):
firstHalf = firstHalf + nHistory[i]
secondHalf = secondHalf + nHistory[i+int(halfLength)]
half1 = firstHalf/halfLength
half2 = secondHalf/halfLength
if abs(half1) < abs(half2):
print('This function Diverges. Re-do.')
sys.exit(0)
# else:
# print('It converges. I shall continue.')
return varNew,error
|
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
###
# Copyright (2021) Hewlett Packard Enterprise Development LP
#
# Licensed under the Apache License, Version 2.0 (the "License");
# You may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
###
import mock
import pytest
from hpe_test_utils import OneViewBaseTest
from oneview_module_loader import IdPoolsIpv4SubnetModule
FAKE_MSG_ERROR = 'Fake message error'
DEFAULT_SUBNET_TEMPLATE = dict(
name='Ipv4Subnet',
uri='/rest/subnet/test',
type='Subnet',
networkId='10.1.0.0',
domain='example.com'
)
PARAMS_FOR_PRESENT = dict(
config='config.json',
state='present',
data=dict(networkId=DEFAULT_SUBNET_TEMPLATE['networkId'])
)
PARAMS_FOR_PRESENT_WITH_URI = dict(
config='config.json',
state='present',
data=dict(uri=DEFAULT_SUBNET_TEMPLATE['uri'])
)
PARAMS_FOR_INVALID = dict(
config='config.json',
state='present',
data=dict(type=DEFAULT_SUBNET_TEMPLATE['type'])
)
PARAMS_WITH_CHANGES = dict(
config='config.json',
state='present',
data=dict(networkId=DEFAULT_SUBNET_TEMPLATE['networkId'],
domain='newdomain.com')
)
PARAMS_FOR_ABSENT = dict(
config='config.json',
state='absent',
data=dict(networkId=DEFAULT_SUBNET_TEMPLATE['networkId'])
)
PARAMS_FOR_COLLECT = dict(
config='config.json',
state='collect',
data=dict(networkId=DEFAULT_SUBNET_TEMPLATE['networkId'],
idList=['10.1.1.1', '10.1.1.2'])
)
PARAMS_FOR_ALLOCATE = dict(
config='config.json',
state='allocate',
data=dict(networkId=DEFAULT_SUBNET_TEMPLATE['networkId'],
count=2)
)
@pytest.mark.resource(TestIdPoolsIpv4SubnetModule='id_pools_ipv4_subnets')
class TestIdPoolsIpv4SubnetModule(OneViewBaseTest):
"""
OneViewBaseTestCase provides the mocks used in this test case
"""
def test_should_create_new_id_pools_ipv4_subnet(self):
self.resource.get_by_field.return_value = None
self.resource.create.return_value = self.resource
self.resource.data = DEFAULT_SUBNET_TEMPLATE
self.mock_ansible_module.params = PARAMS_FOR_PRESENT
IdPoolsIpv4SubnetModule().run()
self.mock_ansible_module.exit_json.assert_called_once_with(
changed=True,
msg=IdPoolsIpv4SubnetModule.MSG_CREATED,
ansible_facts=dict(id_pools_ipv4_subnet=DEFAULT_SUBNET_TEMPLATE)
)
def test_should_not_update_when_data_is_equals(self):
self.resource.data = DEFAULT_SUBNET_TEMPLATE
self.resource.get_by_field.return_value = self.resource
self.mock_ansible_module.params = PARAMS_FOR_PRESENT
IdPoolsIpv4SubnetModule().run()
self.mock_ansible_module.exit_json.assert_called_once_with(
changed=False,
msg=IdPoolsIpv4SubnetModule.MSG_ALREADY_PRESENT,
ansible_facts=dict(id_pools_ipv4_subnet=DEFAULT_SUBNET_TEMPLATE)
)
def test_should_get_the_same_resource_by_networkid(self):
self.resource.data = DEFAULT_SUBNET_TEMPLATE
self.resource.get_by_field.return_value = self.resource
self.mock_ansible_module.params = PARAMS_FOR_PRESENT
IdPoolsIpv4SubnetModule().run()
self.mock_ansible_module.exit_json.assert_called_once_with(
changed=False,
msg=IdPoolsIpv4SubnetModule.MSG_ALREADY_PRESENT,
ansible_facts=dict(id_pools_ipv4_subnet=DEFAULT_SUBNET_TEMPLATE)
)
def test_should_get_the_same_resource_by_uri(self):
self.resource.data = DEFAULT_SUBNET_TEMPLATE
self.resource.get_by_uri.return_value = self.resource
self.mock_ansible_module.params = PARAMS_FOR_PRESENT_WITH_URI
IdPoolsIpv4SubnetModule().run()
self.mock_ansible_module.exit_json.assert_called_once_with(
changed=False,
msg=IdPoolsIpv4SubnetModule.MSG_ALREADY_PRESENT,
ansible_facts=dict(id_pools_ipv4_subnet=DEFAULT_SUBNET_TEMPLATE)
)
def test_update_when_data_has_modified_attributes(self):
data_merged = DEFAULT_SUBNET_TEMPLATE.copy()
data_merged['domain'] = 'diffdomain.com'
self.resource.data = data_merged
self.resource.get_by_field.return_value = self.resource
self.resource.update.return_value = data_merged
self.mock_ansible_module.params = PARAMS_WITH_CHANGES
IdPoolsIpv4SubnetModule().run()
self.mock_ansible_module.exit_json.assert_called_once_with(
changed=True,
msg=IdPoolsIpv4SubnetModule.MSG_UPDATED,
ansible_facts=dict(id_pools_ipv4_subnet=data_merged)
)
def test_should_allocate_when_valid_ids_present(self):
data_merged = DEFAULT_SUBNET_TEMPLATE.copy()
data_merged['count'] = 2
data_merged['allocatorUri'] = '/rest/fake'
self.resource.data = data_merged
self.resource.get_by_field.return_value = self.resource
self.resource.allocate.return_value = {'idList': ['172.9.0.1', '172.9.0.2']}
self.mock_ansible_module.params = PARAMS_FOR_ALLOCATE
IdPoolsIpv4SubnetModule().run()
self.mock_ansible_module.exit_json.assert_called_once_with(
changed=True,
msg=IdPoolsIpv4SubnetModule.MSG_ALLOCATE,
ansible_facts=dict(id_pools_ipv4_subnet={'idList': ['172.9.0.1', '172.9.0.2']})
)
def test_should_collect_when_valid_ids_allocated(self):
data_merged = DEFAULT_SUBNET_TEMPLATE.copy()
data_merged['idList'] = ['10.1.1.1', '10.1.1.2']
data_merged['allocatorUri'] = '/rest/fake'
self.resource.data = data_merged
self.resource.get_by_field.return_value = self.resource
self.resource.collect.return_value = {'idList': ['10.1.1.1', '10.1.1.1']}
self.mock_ansible_module.params = PARAMS_FOR_COLLECT
IdPoolsIpv4SubnetModule().run()
self.mock_ansible_module.exit_json.assert_called_once_with(
changed=True,
msg=IdPoolsIpv4SubnetModule.MSG_COLLECT,
ansible_facts=dict(id_pools_ipv4_subnet={'idList': ['10.1.1.1', '10.1.1.1']})
)
def test_should_remove_id_pools_ipv4_subnet(self):
self.resource.data = DEFAULT_SUBNET_TEMPLATE
self.resource.get_by_field.return_value = self.resource
self.mock_ansible_module.params = PARAMS_FOR_ABSENT
IdPoolsIpv4SubnetModule().run()
self.mock_ansible_module.exit_json.assert_called_once_with(
changed=True,
msg=IdPoolsIpv4SubnetModule.MSG_DELETED
)
def test_should_do_nothing_when_id_pools_ipv4_subnet_not_exist(self):
self.resource.get_by_field.return_value = None
self.mock_ansible_module.params = PARAMS_FOR_ABSENT
IdPoolsIpv4SubnetModule().run()
self.mock_ansible_module.exit_json.assert_called_once_with(
changed=False,
msg=IdPoolsIpv4SubnetModule.MSG_ALREADY_ABSENT
)
if __name__ == '__main__':
pytest.main([__file__])
|
|
__author__ = 'tdp'
from Decode.utils import isZeroBit
from Execute.utils import *
import operator as op
CPSR_Mask = 0b11111000111111110000001111011111
APSR_Mask = 0b11111000000111100000000000000000
class StandardInstructions:
def __init__(self, registers, process_mode, memory):
self.registers = registers
self.process_mode = process_mode
self.memory = memory
def __getitem__(self, item):
return getattr(self, item)
def _imediate_template(self, args, operator):
pass
def _register_template(self, args, operator):
if args['encoding'] == 'A1-R':
'''S, Rn, Rd, imm5, type, Rm'''
try:
result = operator(self.registers[args['Rn']], decode_immediate_shift(args, self.process_mode.CPSR, self.registers))
carry = getbit(self.registers[args['Rn']], args('imm5'))
except NotShifted:
result = operator(self.registers[args['Rn']], self.registers[args['Rm']])
carry = 0
if args['S']:
self.process_mode.CPSR.negative_flag = getbit(result, 31)
self.process_mode.CPSR.zero_flag = isZeroBit(result)
self.process_mode.CPSR.carry_flag = carry
self.registers[args['Rd']] = result
def _register_shifted_template(self, args, operator):
pass
def ADC(self, args):
""" Add with Carry """
raise NotImplementedError
def ADD(self, args):
""" Add """
raise NotImplementedError
def ADR(self, args):
"""Add an immediate value with PC value """
raise NotImplementedError
def AND(self, args):
""" AND """
if args['encoding'] == 'A1-R':
'''S, Rn, Rd, imm5, type, Rm'''
#try:
# result = self.registers[args['Rn']] & decode_immediate_shift(args, self.process_mode.CPSR, self.registers)
# carry = getbit(self.registers[args['Rn']], args('imm5'))
#except NotShifted:
# result = self.registers[args['Rn']] & self.registers[args['Rm']]
# carry = 0
#if args['S']:
# self.process_mode.CPSR.negative_flag = getbit(result, 31)
# self.process_mode.CPSR.zero_flag = isZeroBit(result)
# self.process_mode.CPSR.carry_flag = carry
#self.registers[args['Rd']] = result
self._register_template(args, op.and_)
if args['encoding'] == 'A1-RSR':
raise NotImplementedError
if args['encoding'] == 'A1-I':
raise NotImplementedError
if args['encoding'] == 'T1-R':
raise NotImplementedError
if args['encoding'] == 'T2-R':
raise NotImplementedError
if args['encoding'] == 'T1-I':
raise NotImplementedError
def BIC(self, args):
""" Bitwise Bit Clear """
raise NotImplementedError
def CMN(self, args):
""" Compare Negative """
raise NotImplementedError
def CMP(self, args):
""" Compare """
raise NotImplementedError
def EOR(self, args):
""" Exclusive OR """
if args['encoding'] == 'A1-I':
raise NotImplementedError
if args['encoding'] == 'A1-R':
self._register_template(args, op.xor)
if args['encoding'] == 'A1-RSR':
raise NotImplementedError
if args['encoding'] == 'T1-I':
raise NotImplementedError
if args['encoding'] == 'T1-R':
raise NotImplementedError
if args['encoding'] == 'T2-R':
raise NotImplementedError
def MOV(self, args):
""" Move """
if args['encoding'] == 'A1-I':
'''S, Rd, imm12'''
raise NotImplementedError
if args['encoding'] == 'A1-R':
Rm = self.registers[args['Rm']]
self.registers[args['Rd']] = Rm
self.process_mode.CPSR.negative_flag = Rm >> 31
self.process_mode.CPSR.zero_flag = isZeroBit(Rm)
def MVN(self, args):
""" Bitwise Not """
raise NotImplementedError
def ORN(self, args):
""" Bitwise OR NOT """
raise NotImplementedError
def ORR(self, args):
""" Bitwise OR """
raise NotImplementedError
def RSB(self, args):
""" Reverse Subtract """
raise NotImplementedError
def RSC(self, args):
""" Reverse Subtract with Carry """
raise NotImplementedError
def SBC(self, args):
""" Subtract with Carry """
raise NotImplementedError
def SUB(self, args):
""" Subtract """
raise NotImplementedError
def TEQ(self, args):
""" Test Equivalence """
raise NotImplementedError
def TST(self, args):
""" Test """
if args['encoding'] == 'A1-I':
# Rn, imm12
imm32, carry = arm_expand_immediate(args['imm12'], self.process_mode.CPSR.carry_flag)
result = args['Rn'] & imm32
self.process_mode.CPSR.negative_flag = getbit(result, 31)
self.process_mode.CPSR.zero_flag = isZeroBit(result)
self.process_mode.CPSR.carry_flag = carry
if args['encoding'] == 'T1-I':
raise NotImplementedError
if args['encoding'] == 'A1-R':
raise NotImplementedError
if args['encoding'] == 'T1-R':
raise NotImplementedError
if args['encoding'] == 'T2-R':
raise NotImplementedError
if args['encoding'] == 'A1-RSR':
raise NotImplementedError
class ShiftInstructions:
def __init__(self, registers, process_mode, memory):
self.registers = registers
self.process_mode = process_mode
self.memory = memory
def __getitem__(self, item):
return getattr(self, item)
def ASR(self, args):
""" Arithmetic Shift Right """
raise NotImplementedError
def LSL(self, args):
""" Logical Shift Left """
raise NotImplementedError
def LSR(self, args):
""" Logical Shift Right """
raise NotImplementedError
def ROR(self, args):
""" Rotate Right """
raise NotImplementedError
def RRX(self, args):
""" Rotate Right with Extend """
raise NotImplementedError
class MultiplyInstructions:
def __init__(self, registers, process_mode, memory):
self.registers = registers
self.process_mode = process_mode
self.memory = memory
def __getitem__(self, item):
return getattr(self, item)
class SaturatingInstructions:
def __init__(self, registers, process_mode, memory):
self.registers = registers
self.process_mode = process_mode
self.memory = memory
def __getitem__(self, item):
return getattr(self, item)
class SaturatingAddSubInstructions:
def __init__(self, registers, process_mode, memory):
self.registers = registers
self.process_mode = process_mode
self.memory = memory
def __getitem__(self, item):
return getattr(self, item)
class PackingUnpackingInstructions:
def __init__(self, registers, process_mode, memory):
self.registers = registers
self.process_mode = process_mode
self.memory = memory
def __getitem__(self, item):
return getattr(self, item)
class ParallelAddSubInstructions:
def __init__(self, registers, process_mode, memory):
self.registers = registers
self.process_mode = process_mode
self.memory = memory
def __getitem__(self, item):
return getattr(self, item)
class DivideInstructions:
def __init__(self, registers, process_mode, memory):
self.registers = registers
self.process_mode = process_mode
self.memory = memory
def __getitem__(self, item):
return getattr(self, item)
class MiscInstructions:
def __init__(self, registers, process_mode, memory):
self.registers = registers
self.process_mode = process_mode
self.memory = memory
def __getitem__(self, item):
return getattr(self, item)
class MiscInstructions:
def __init__(self, registers, process_mode, memory):
self.registers = registers
self.process_mode = process_mode
self.memory = memory
self.privilege_level = process_mode.current_mode.privilege_level
def __getitem__(self, item):
return getattr(self, item)
def MRS(self, args):
""" Move to Register from Special register """
if args['encoding'] == 'A1':
if self.privilege_level > 0: # Execute as system level
if args['R'] == 1: # read SPSR
self.registers[args['Rd']] = int(self.process_mode.current_mode.SPSR)
if args['R'] == 0:
# CPSR is read with execution state bits other than E masked out.
self.registers[args['Rd']] = int(self.process_mode.CPSR) & CPSR_Mask
else: # Execute as application level
self.registers[args['Rd']] = int(self.process_mode.CPSR) & APSR_Mask
if args['encoding'] == 'A1-BR':
raise NotImplementedError
if args['encoding'] == 'T1':
raise NotImplementedError
if args['encoding'] == 'T1-BR':
raise NotImplementedError
|
|
from __future__ import unicode_literals
import datetime
from decimal import Decimal
import unittest
import warnings
from django import test
from django import forms
from django.core import validators
from django.core.exceptions import ValidationError
from django.db import connection, transaction, models, IntegrityError
from django.db.models.fields import (
AutoField, BigIntegerField, BinaryField, BooleanField, CharField,
CommaSeparatedIntegerField, DateField, DateTimeField, DecimalField,
EmailField, FilePathField, FloatField, IntegerField, IPAddressField,
GenericIPAddressField, NOT_PROVIDED, NullBooleanField, PositiveIntegerField,
PositiveSmallIntegerField, SlugField, SmallIntegerField, TextField,
TimeField, URLField)
from django.db.models.fields.files import FileField, ImageField
from django.utils import six
from django.utils.functional import lazy
from .models import (
Foo, Bar, Whiz, BigD, BigS, BigIntegerModel, Post, NullBooleanModel,
BooleanModel, PrimaryKeyCharModel, DataModel, Document, RenamedField,
DateTimeModel, VerboseNameField, FksToBooleans, FkToChar, FloatModel,
SmallIntegerModel, IntegerModel, PositiveSmallIntegerModel, PositiveIntegerModel)
class BasicFieldTests(test.TestCase):
def test_show_hidden_initial(self):
"""
Regression test for #12913. Make sure fields with choices respect
show_hidden_initial as a kwarg to models.Field.formfield()
"""
choices = [(0, 0), (1, 1)]
model_field = models.Field(choices=choices)
form_field = model_field.formfield(show_hidden_initial=True)
self.assertTrue(form_field.show_hidden_initial)
form_field = model_field.formfield(show_hidden_initial=False)
self.assertFalse(form_field.show_hidden_initial)
def test_nullbooleanfield_blank(self):
"""
Regression test for #13071: NullBooleanField should not throw
a validation error when given a value of None.
"""
nullboolean = NullBooleanModel(nbfield=None)
try:
nullboolean.full_clean()
except ValidationError as e:
self.fail("NullBooleanField failed validation with value of None: %s" % e.messages)
def test_field_repr(self):
"""
Regression test for #5931: __repr__ of a field also displays its name
"""
f = Foo._meta.get_field('a')
self.assertEqual(repr(f), '<django.db.models.fields.CharField: a>')
f = models.fields.CharField()
self.assertEqual(repr(f), '<django.db.models.fields.CharField>')
def test_field_name(self):
"""
Regression test for #14695: explicitly defined field name overwritten
by model's attribute name.
"""
instance = RenamedField()
self.assertTrue(hasattr(instance, 'get_fieldname_display'))
self.assertFalse(hasattr(instance, 'get_modelname_display'))
def test_field_verbose_name(self):
m = VerboseNameField
for i in range(1, 23):
self.assertEqual(m._meta.get_field('field%d' % i).verbose_name,
'verbose field%d' % i)
self.assertEqual(m._meta.get_field('id').verbose_name, 'verbose pk')
def test_float_validates_object(self):
instance = FloatModel(size=2.5)
# Try setting float field to unsaved object
instance.size = instance
with transaction.atomic():
with self.assertRaises(TypeError):
instance.save()
# Set value to valid and save
instance.size = 2.5
instance.save()
self.assertTrue(instance.id)
# Set field to object on saved instance
instance.size = instance
with transaction.atomic():
with self.assertRaises(TypeError):
instance.save()
# Try setting field to object on retrieved object
obj = FloatModel.objects.get(pk=instance.id)
obj.size = obj
with self.assertRaises(TypeError):
obj.save()
def test_choices_form_class(self):
"""Can supply a custom choices form class. Regression for #20999."""
choices = [('a', 'a')]
field = models.CharField(choices=choices)
klass = forms.TypedMultipleChoiceField
self.assertIsInstance(field.formfield(choices_form_class=klass), klass)
def test_field_str(self):
from django.utils.encoding import force_str
f = Foo._meta.get_field('a')
self.assertEqual(force_str(f), "model_fields.Foo.a")
class DecimalFieldTests(test.TestCase):
def test_to_python(self):
f = models.DecimalField(max_digits=4, decimal_places=2)
self.assertEqual(f.to_python(3), Decimal("3"))
self.assertEqual(f.to_python("3.14"), Decimal("3.14"))
self.assertRaises(ValidationError, f.to_python, "abc")
def test_default(self):
f = models.DecimalField(default=Decimal("0.00"))
self.assertEqual(f.get_default(), Decimal("0.00"))
def test_format(self):
f = models.DecimalField(max_digits=5, decimal_places=1)
self.assertEqual(f._format(f.to_python(2)), '2.0')
self.assertEqual(f._format(f.to_python('2.6')), '2.6')
self.assertEqual(f._format(None), None)
def test_get_db_prep_lookup(self):
f = models.DecimalField(max_digits=5, decimal_places=1)
self.assertEqual(f.get_db_prep_lookup('exact', None, connection=connection), [None])
def test_filter_with_strings(self):
"""
We should be able to filter decimal fields using strings (#8023)
"""
Foo.objects.create(id=1, a='abc', d=Decimal("12.34"))
self.assertEqual(list(Foo.objects.filter(d='1.23')), [])
def test_save_without_float_conversion(self):
"""
Ensure decimals don't go through a corrupting float conversion during
save (#5079).
"""
bd = BigD(d="12.9")
bd.save()
bd = BigD.objects.get(pk=bd.pk)
self.assertEqual(bd.d, Decimal("12.9"))
def test_lookup_really_big_value(self):
"""
Ensure that really big values can be used in a filter statement, even
with older Python versions.
"""
# This should not crash. That counts as a win for our purposes.
Foo.objects.filter(d__gte=100000000000)
class ForeignKeyTests(test.TestCase):
def test_callable_default(self):
"""Test the use of a lazy callable for ForeignKey.default"""
a = Foo.objects.create(id=1, a='abc', d=Decimal("12.34"))
b = Bar.objects.create(b="bcd")
self.assertEqual(b.a, a)
@test.skipIfDBFeature('interprets_empty_strings_as_nulls')
def test_empty_string_fk(self):
"""
Test that foreign key values to empty strings don't get converted
to None (#19299)
"""
char_model_empty = PrimaryKeyCharModel.objects.create(string='')
fk_model_empty = FkToChar.objects.create(out=char_model_empty)
fk_model_empty = FkToChar.objects.select_related('out').get(id=fk_model_empty.pk)
self.assertEqual(fk_model_empty.out, char_model_empty)
class DateTimeFieldTests(unittest.TestCase):
def test_datetimefield_to_python_usecs(self):
"""DateTimeField.to_python should support usecs"""
f = models.DateTimeField()
self.assertEqual(f.to_python('2001-01-02 03:04:05.000006'),
datetime.datetime(2001, 1, 2, 3, 4, 5, 6))
self.assertEqual(f.to_python('2001-01-02 03:04:05.999999'),
datetime.datetime(2001, 1, 2, 3, 4, 5, 999999))
def test_timefield_to_python_usecs(self):
"""TimeField.to_python should support usecs"""
f = models.TimeField()
self.assertEqual(f.to_python('01:02:03.000004'),
datetime.time(1, 2, 3, 4))
self.assertEqual(f.to_python('01:02:03.999999'),
datetime.time(1, 2, 3, 999999))
@test.skipUnlessDBFeature("supports_microsecond_precision")
def test_datetimes_save_completely(self):
dat = datetime.date(2014, 3, 12)
datetim = datetime.datetime(2014, 3, 12, 21, 22, 23, 240000)
tim = datetime.time(21, 22, 23, 240000)
DateTimeModel.objects.create(d=dat, dt=datetim, t=tim)
obj = DateTimeModel.objects.first()
self.assertTrue(obj)
self.assertEqual(obj.d, dat)
self.assertEqual(obj.dt, datetim)
self.assertEqual(obj.t, tim)
class BooleanFieldTests(unittest.TestCase):
def _test_get_db_prep_lookup(self, f):
self.assertEqual(f.get_db_prep_lookup('exact', True, connection=connection), [True])
self.assertEqual(f.get_db_prep_lookup('exact', '1', connection=connection), [True])
self.assertEqual(f.get_db_prep_lookup('exact', 1, connection=connection), [True])
self.assertEqual(f.get_db_prep_lookup('exact', False, connection=connection), [False])
self.assertEqual(f.get_db_prep_lookup('exact', '0', connection=connection), [False])
self.assertEqual(f.get_db_prep_lookup('exact', 0, connection=connection), [False])
self.assertEqual(f.get_db_prep_lookup('exact', None, connection=connection), [None])
def _test_to_python(self, f):
self.assertTrue(f.to_python(1) is True)
self.assertTrue(f.to_python(0) is False)
def test_booleanfield_get_db_prep_lookup(self):
self._test_get_db_prep_lookup(models.BooleanField())
def test_nullbooleanfield_get_db_prep_lookup(self):
self._test_get_db_prep_lookup(models.NullBooleanField())
def test_booleanfield_to_python(self):
self._test_to_python(models.BooleanField())
def test_nullbooleanfield_to_python(self):
self._test_to_python(models.NullBooleanField())
def test_charfield_textfield_max_length_passed_to_formfield(self):
"""
Test that CharField and TextField pass their max_length attributes to
form fields created using their .formfield() method (#22206).
"""
cf1 = models.CharField()
cf2 = models.CharField(max_length=1234)
self.assertIsNone(cf1.formfield().max_length)
self.assertEqual(1234, cf2.formfield().max_length)
tf1 = models.TextField()
tf2 = models.TextField(max_length=2345)
self.assertIsNone(tf1.formfield().max_length)
self.assertEqual(2345, tf2.formfield().max_length)
def test_booleanfield_choices_blank(self):
"""
Test that BooleanField with choices and defaults doesn't generate a
formfield with the blank option (#9640, #10549).
"""
choices = [(1, 'Si'), (2, 'No')]
f = models.BooleanField(choices=choices, default=1, null=True)
self.assertEqual(f.formfield().choices, [('', '---------')] + choices)
f = models.BooleanField(choices=choices, default=1, null=False)
self.assertEqual(f.formfield().choices, choices)
def test_return_type(self):
b = BooleanModel()
b.bfield = True
b.save()
b2 = BooleanModel.objects.get(pk=b.pk)
self.assertIsInstance(b2.bfield, bool)
self.assertEqual(b2.bfield, True)
b3 = BooleanModel()
b3.bfield = False
b3.save()
b4 = BooleanModel.objects.get(pk=b3.pk)
self.assertIsInstance(b4.bfield, bool)
self.assertEqual(b4.bfield, False)
b = NullBooleanModel()
b.nbfield = True
b.save()
b2 = NullBooleanModel.objects.get(pk=b.pk)
self.assertIsInstance(b2.nbfield, bool)
self.assertEqual(b2.nbfield, True)
b3 = NullBooleanModel()
b3.nbfield = False
b3.save()
b4 = NullBooleanModel.objects.get(pk=b3.pk)
self.assertIsInstance(b4.nbfield, bool)
self.assertEqual(b4.nbfield, False)
# http://code.djangoproject.com/ticket/13293
# Verify that when an extra clause exists, the boolean
# conversions are applied with an offset
b5 = BooleanModel.objects.all().extra(
select={'string_col': 'string'})[0]
self.assertNotIsInstance(b5.pk, bool)
def test_select_related(self):
"""
Test type of boolean fields when retrieved via select_related() (MySQL,
#15040)
"""
bmt = BooleanModel.objects.create(bfield=True)
bmf = BooleanModel.objects.create(bfield=False)
nbmt = NullBooleanModel.objects.create(nbfield=True)
nbmf = NullBooleanModel.objects.create(nbfield=False)
m1 = FksToBooleans.objects.create(bf=bmt, nbf=nbmt)
m2 = FksToBooleans.objects.create(bf=bmf, nbf=nbmf)
# Test select_related('fk_field_name')
ma = FksToBooleans.objects.select_related('bf').get(pk=m1.id)
# verify types -- should't be 0/1
self.assertIsInstance(ma.bf.bfield, bool)
self.assertIsInstance(ma.nbf.nbfield, bool)
# verify values
self.assertEqual(ma.bf.bfield, True)
self.assertEqual(ma.nbf.nbfield, True)
# Test select_related()
mb = FksToBooleans.objects.select_related().get(pk=m1.id)
mc = FksToBooleans.objects.select_related().get(pk=m2.id)
# verify types -- shouldn't be 0/1
self.assertIsInstance(mb.bf.bfield, bool)
self.assertIsInstance(mb.nbf.nbfield, bool)
self.assertIsInstance(mc.bf.bfield, bool)
self.assertIsInstance(mc.nbf.nbfield, bool)
# verify values
self.assertEqual(mb.bf.bfield, True)
self.assertEqual(mb.nbf.nbfield, True)
self.assertEqual(mc.bf.bfield, False)
self.assertEqual(mc.nbf.nbfield, False)
def test_null_default(self):
"""
Check that a BooleanField defaults to None -- which isn't
a valid value (#15124).
"""
# Patch the boolean field's default value. We give it a default
# value when defining the model to satisfy the check tests
# #20895.
boolean_field = BooleanModel._meta.get_field('bfield')
self.assertTrue(boolean_field.has_default())
old_default = boolean_field.default
try:
boolean_field.default = NOT_PROVIDED
# check patch was successful
self.assertFalse(boolean_field.has_default())
b = BooleanModel()
self.assertIsNone(b.bfield)
with self.assertRaises(IntegrityError):
b.save()
finally:
boolean_field.default = old_default
nb = NullBooleanModel()
self.assertIsNone(nb.nbfield)
nb.save() # no error
class ChoicesTests(test.TestCase):
def test_choices_and_field_display(self):
"""
Check that get_choices and get_flatchoices interact with
get_FIELD_display to return the expected values (#7913).
"""
self.assertEqual(Whiz(c=1).get_c_display(), 'First') # A nested value
self.assertEqual(Whiz(c=0).get_c_display(), 'Other') # A top level value
self.assertEqual(Whiz(c=9).get_c_display(), 9) # Invalid value
self.assertEqual(Whiz(c=None).get_c_display(), None) # Blank value
self.assertEqual(Whiz(c='').get_c_display(), '') # Empty value
class SlugFieldTests(test.TestCase):
def test_slugfield_max_length(self):
"""
Make sure SlugField honors max_length (#9706)
"""
bs = BigS.objects.create(s='slug' * 50)
bs = BigS.objects.get(pk=bs.pk)
self.assertEqual(bs.s, 'slug' * 50)
class ValidationTest(test.TestCase):
def test_charfield_raises_error_on_empty_string(self):
f = models.CharField()
self.assertRaises(ValidationError, f.clean, "", None)
def test_charfield_cleans_empty_string_when_blank_true(self):
f = models.CharField(blank=True)
self.assertEqual('', f.clean('', None))
def test_integerfield_cleans_valid_string(self):
f = models.IntegerField()
self.assertEqual(2, f.clean('2', None))
def test_integerfield_raises_error_on_invalid_intput(self):
f = models.IntegerField()
self.assertRaises(ValidationError, f.clean, "a", None)
def test_charfield_with_choices_cleans_valid_choice(self):
f = models.CharField(max_length=1,
choices=[('a', 'A'), ('b', 'B')])
self.assertEqual('a', f.clean('a', None))
def test_charfield_with_choices_raises_error_on_invalid_choice(self):
f = models.CharField(choices=[('a', 'A'), ('b', 'B')])
self.assertRaises(ValidationError, f.clean, "not a", None)
def test_charfield_get_choices_with_blank_defined(self):
f = models.CharField(choices=[('', '<><>'), ('a', 'A')])
self.assertEqual(f.get_choices(True), [('', '<><>'), ('a', 'A')])
def test_choices_validation_supports_named_groups(self):
f = models.IntegerField(
choices=(('group', ((10, 'A'), (20, 'B'))), (30, 'C')))
self.assertEqual(10, f.clean(10, None))
def test_nullable_integerfield_raises_error_with_blank_false(self):
f = models.IntegerField(null=True, blank=False)
self.assertRaises(ValidationError, f.clean, None, None)
def test_nullable_integerfield_cleans_none_on_null_and_blank_true(self):
f = models.IntegerField(null=True, blank=True)
self.assertEqual(None, f.clean(None, None))
def test_integerfield_raises_error_on_empty_input(self):
f = models.IntegerField(null=False)
self.assertRaises(ValidationError, f.clean, None, None)
self.assertRaises(ValidationError, f.clean, '', None)
def test_integerfield_validates_zero_against_choices(self):
f = models.IntegerField(choices=((1, 1),))
self.assertRaises(ValidationError, f.clean, '0', None)
def test_charfield_raises_error_on_empty_input(self):
f = models.CharField(null=False)
self.assertRaises(ValidationError, f.clean, None, None)
def test_datefield_cleans_date(self):
f = models.DateField()
self.assertEqual(datetime.date(2008, 10, 10), f.clean('2008-10-10', None))
def test_boolean_field_doesnt_accept_empty_input(self):
f = models.BooleanField()
self.assertRaises(ValidationError, f.clean, None, None)
class IntegerFieldTests(test.TestCase):
model = IntegerModel
documented_range = (-2147483648, 2147483647)
def test_documented_range(self):
"""
Ensure that values within the documented safe range pass validation,
can be saved and retrieved without corruption.
"""
min_value, max_value = self.documented_range
instance = self.model(value=min_value)
instance.full_clean()
instance.save()
qs = self.model.objects.filter(value__lte=min_value)
self.assertEqual(qs.count(), 1)
self.assertEqual(qs[0].value, min_value)
instance = self.model(value=max_value)
instance.full_clean()
instance.save()
qs = self.model.objects.filter(value__gte=max_value)
self.assertEqual(qs.count(), 1)
self.assertEqual(qs[0].value, max_value)
def test_backend_range_validation(self):
"""
Ensure that backend specific range are enforced at the model
validation level. ref #12030.
"""
field = self.model._meta.get_field('value')
internal_type = field.get_internal_type()
min_value, max_value = connection.ops.integer_field_range(internal_type)
if min_value is not None:
instance = self.model(value=min_value - 1)
expected_message = validators.MinValueValidator.message % {
'limit_value': min_value
}
with self.assertRaisesMessage(ValidationError, expected_message):
instance.full_clean()
instance.value = min_value
instance.full_clean()
if max_value is not None:
instance = self.model(value=max_value + 1)
expected_message = validators.MaxValueValidator.message % {
'limit_value': max_value
}
with self.assertRaisesMessage(ValidationError, expected_message):
instance.full_clean()
instance.value = max_value
instance.full_clean()
def test_types(self):
instance = self.model(value=0)
self.assertIsInstance(instance.value, six.integer_types)
instance.save()
self.assertIsInstance(instance.value, six.integer_types)
instance = self.model.objects.get()
self.assertIsInstance(instance.value, six.integer_types)
def test_coercing(self):
self.model.objects.create(value='10')
instance = self.model.objects.get(value='10')
self.assertEqual(instance.value, 10)
class SmallIntegerFieldTests(IntegerFieldTests):
model = SmallIntegerModel
documented_range = (-32768, 32767)
class BigIntegerFieldTests(IntegerFieldTests):
model = BigIntegerModel
documented_range = (-9223372036854775808, 9223372036854775807)
class PositiveSmallIntegerFieldTests(IntegerFieldTests):
model = PositiveSmallIntegerModel
documented_range = (0, 32767)
class PositiveIntegerFieldTests(IntegerFieldTests):
model = PositiveIntegerModel
documented_range = (0, 2147483647)
class TypeCoercionTests(test.TestCase):
"""
Test that database lookups can accept the wrong types and convert
them with no error: especially on Postgres 8.3+ which does not do
automatic casting at the DB level. See #10015.
"""
def test_lookup_integer_in_charfield(self):
self.assertEqual(Post.objects.filter(title=9).count(), 0)
def test_lookup_integer_in_textfield(self):
self.assertEqual(Post.objects.filter(body=24).count(), 0)
class FileFieldTests(unittest.TestCase):
def test_clearable(self):
"""
Test that FileField.save_form_data will clear its instance attribute
value if passed False.
"""
d = Document(myfile='something.txt')
self.assertEqual(d.myfile, 'something.txt')
field = d._meta.get_field('myfile')
field.save_form_data(d, False)
self.assertEqual(d.myfile, '')
def test_unchanged(self):
"""
Test that FileField.save_form_data considers None to mean "no change"
rather than "clear".
"""
d = Document(myfile='something.txt')
self.assertEqual(d.myfile, 'something.txt')
field = d._meta.get_field('myfile')
field.save_form_data(d, None)
self.assertEqual(d.myfile, 'something.txt')
def test_changed(self):
"""
Test that FileField.save_form_data, if passed a truthy value, updates
its instance attribute.
"""
d = Document(myfile='something.txt')
self.assertEqual(d.myfile, 'something.txt')
field = d._meta.get_field('myfile')
field.save_form_data(d, 'else.txt')
self.assertEqual(d.myfile, 'else.txt')
def test_delete_when_file_unset(self):
"""
Calling delete on an unset FileField should not call the file deletion
process, but fail silently (#20660).
"""
d = Document()
try:
d.myfile.delete()
except OSError:
self.fail("Deleting an unset FileField should not raise OSError.")
class BinaryFieldTests(test.TestCase):
binary_data = b'\x00\x46\xFE'
def test_set_and_retrieve(self):
data_set = (self.binary_data, six.memoryview(self.binary_data))
for bdata in data_set:
dm = DataModel(data=bdata)
dm.save()
dm = DataModel.objects.get(pk=dm.pk)
self.assertEqual(bytes(dm.data), bytes(bdata))
# Resave (=update)
dm.save()
dm = DataModel.objects.get(pk=dm.pk)
self.assertEqual(bytes(dm.data), bytes(bdata))
# Test default value
self.assertEqual(bytes(dm.short_data), b'\x08')
if connection.vendor == 'mysql' and six.PY3:
# Existing MySQL DB-API drivers fail on binary data.
test_set_and_retrieve = unittest.expectedFailure(test_set_and_retrieve)
def test_max_length(self):
dm = DataModel(short_data=self.binary_data * 4)
self.assertRaises(ValidationError, dm.full_clean)
class GenericIPAddressFieldTests(test.TestCase):
def test_genericipaddressfield_formfield_protocol(self):
"""
Test that GenericIPAddressField with a specified protocol does not
generate a formfield with no specified protocol. See #20740.
"""
model_field = models.GenericIPAddressField(protocol='IPv4')
form_field = model_field.formfield()
self.assertRaises(ValidationError, form_field.clean, '::1')
model_field = models.GenericIPAddressField(protocol='IPv6')
form_field = model_field.formfield()
self.assertRaises(ValidationError, form_field.clean, '127.0.0.1')
class PromiseTest(test.TestCase):
def test_AutoField(self):
lazy_func = lazy(lambda: 1, int)
self.assertIsInstance(
AutoField(primary_key=True).get_prep_value(lazy_func()),
int)
@unittest.skipIf(six.PY3, "Python 3 has no `long` type.")
def test_BigIntegerField(self):
lazy_func = lazy(lambda: long(9999999999999999999), long)
self.assertIsInstance(
BigIntegerField().get_prep_value(lazy_func()),
long)
def test_BinaryField(self):
lazy_func = lazy(lambda: b'', bytes)
self.assertIsInstance(
BinaryField().get_prep_value(lazy_func()),
bytes)
def test_BooleanField(self):
lazy_func = lazy(lambda: True, bool)
self.assertIsInstance(
BooleanField().get_prep_value(lazy_func()),
bool)
def test_CharField(self):
lazy_func = lazy(lambda: '', six.text_type)
self.assertIsInstance(
CharField().get_prep_value(lazy_func()),
six.text_type)
def test_CommaSeparatedIntegerField(self):
lazy_func = lazy(lambda: '1,2', six.text_type)
self.assertIsInstance(
CommaSeparatedIntegerField().get_prep_value(lazy_func()),
six.text_type)
def test_DateField(self):
lazy_func = lazy(lambda: datetime.date.today(), datetime.date)
self.assertIsInstance(
DateField().get_prep_value(lazy_func()),
datetime.date)
def test_DateTimeField(self):
lazy_func = lazy(lambda: datetime.datetime.now(), datetime.datetime)
self.assertIsInstance(
DateTimeField().get_prep_value(lazy_func()),
datetime.datetime)
def test_DecimalField(self):
lazy_func = lazy(lambda: Decimal('1.2'), Decimal)
self.assertIsInstance(
DecimalField().get_prep_value(lazy_func()),
Decimal)
def test_EmailField(self):
lazy_func = lazy(lambda: 'mailbox@domain.com', six.text_type)
self.assertIsInstance(
EmailField().get_prep_value(lazy_func()),
six.text_type)
def test_FileField(self):
lazy_func = lazy(lambda: 'filename.ext', six.text_type)
self.assertIsInstance(
FileField().get_prep_value(lazy_func()),
six.text_type)
def test_FilePathField(self):
lazy_func = lazy(lambda: 'tests.py', six.text_type)
self.assertIsInstance(
FilePathField().get_prep_value(lazy_func()),
six.text_type)
def test_FloatField(self):
lazy_func = lazy(lambda: 1.2, float)
self.assertIsInstance(
FloatField().get_prep_value(lazy_func()),
float)
def test_ImageField(self):
lazy_func = lazy(lambda: 'filename.ext', six.text_type)
self.assertIsInstance(
ImageField().get_prep_value(lazy_func()),
six.text_type)
def test_IntegerField(self):
lazy_func = lazy(lambda: 1, int)
self.assertIsInstance(
IntegerField().get_prep_value(lazy_func()),
int)
def test_IPAddressField(self):
lazy_func = lazy(lambda: '127.0.0.1', six.text_type)
with warnings.catch_warnings(record=True):
warnings.simplefilter("always")
self.assertIsInstance(
IPAddressField().get_prep_value(lazy_func()),
six.text_type)
def test_GenericIPAddressField(self):
lazy_func = lazy(lambda: '127.0.0.1', six.text_type)
self.assertIsInstance(
GenericIPAddressField().get_prep_value(lazy_func()),
six.text_type)
def test_NullBooleanField(self):
lazy_func = lazy(lambda: True, bool)
self.assertIsInstance(
NullBooleanField().get_prep_value(lazy_func()),
bool)
def test_PositiveIntegerField(self):
lazy_func = lazy(lambda: 1, int)
self.assertIsInstance(
PositiveIntegerField().get_prep_value(lazy_func()),
int)
def test_PositiveSmallIntegerField(self):
lazy_func = lazy(lambda: 1, int)
self.assertIsInstance(
PositiveSmallIntegerField().get_prep_value(lazy_func()),
int)
def test_SlugField(self):
lazy_func = lazy(lambda: 'slug', six.text_type)
self.assertIsInstance(
SlugField().get_prep_value(lazy_func()),
six.text_type)
def test_SmallIntegerField(self):
lazy_func = lazy(lambda: 1, int)
self.assertIsInstance(
SmallIntegerField().get_prep_value(lazy_func()),
int)
def test_TextField(self):
lazy_func = lazy(lambda: 'Abc', six.text_type)
self.assertIsInstance(
TextField().get_prep_value(lazy_func()),
six.text_type)
def test_TimeField(self):
lazy_func = lazy(lambda: datetime.datetime.now().time(), datetime.time)
self.assertIsInstance(
TimeField().get_prep_value(lazy_func()),
datetime.time)
def test_URLField(self):
lazy_func = lazy(lambda: 'http://domain.com', six.text_type)
self.assertIsInstance(
URLField().get_prep_value(lazy_func()),
six.text_type)
class CustomFieldTests(unittest.TestCase):
def test_14786(self):
"""
Regression test for #14786 -- Test that field values are not prepared
twice in get_db_prep_lookup().
"""
class NoopField(models.TextField):
def __init__(self, *args, **kwargs):
self.prep_value_count = 0
super(NoopField, self).__init__(*args, **kwargs)
def get_prep_value(self, value):
self.prep_value_count += 1
return super(NoopField, self).get_prep_value(value)
field = NoopField()
field.get_db_prep_lookup(
'exact', 'TEST', connection=connection, prepared=False
)
self.assertEqual(field.prep_value_count, 1)
|
|
""" Analize test results for finding bottlenecks """
import re
import sys
import csv
import time
import bisect
import os.path
import argparse
import collections
import yaml
import texttable
try:
import pygraphviz as pgv
except ImportError:
pgv = None
sys.path.append("/mnt/other/work/disk_perf_test_tool")
from wally.run_test import load_data_from
from wally.utils import b2ssize, b2ssize_10
class SensorInfo(object):
def __init__(self, name, print_name, native_ext, to_bytes_coef):
self.name = name
self.print_name = print_name
self.native_ext = native_ext
self.to_bytes_coef = to_bytes_coef
_SINFO = [
SensorInfo('recv_bytes', 'net_recv', 'B', 1),
SensorInfo('send_bytes', 'net_send', 'B', 1),
SensorInfo('sectors_written', 'hdd_write', 'Sect', 512),
SensorInfo('sectors_read', 'hdd_read', 'Sect', 512),
SensorInfo('reads_completed', 'read_op', 'OP', None),
SensorInfo('writes_completed', 'write_op', 'OP', None),
SensorInfo('procs_blocked', 'blocked_procs', 'P', None),
]
SINFO_MAP = dict((sinfo.name, sinfo) for sinfo in _SINFO)
to_bytes = dict((sinfo.name, sinfo.to_bytes_coef)
for sinfo in _SINFO
if sinfo.to_bytes_coef is not None)
class NodeSensorsData(object):
def __init__(self, source_id, hostname, headers, values):
self.source_id = source_id
self.hostname = hostname
self.headers = headers
self.values = values
self.times = None
def finalize(self):
self.times = [v[0] for v in self.values]
def get_data_for_interval(self, beg, end):
p1 = bisect.bisect_left(self.times, beg)
p2 = bisect.bisect_right(self.times, end)
obj = self.__class__(self.source_id,
self.hostname,
self.headers,
self.values[p1:p2])
obj.times = self.times[p1:p2]
return obj
def __getitem__(self, name):
idx = self.headers.index(name.split('.'))
# +1 as first is a time
return [val[idx] for val in self.values]
def load_results_csv(fd):
data = fd.read()
results = {}
for block in data.split("NEW_DATA"):
block = block.strip()
if len(block) == 0:
continue
it = csv.reader(block.split("\n"))
headers = next(it)
sens_data = [map(float, vals) for vals in it]
source_id, hostname = headers[:2]
headers = [(None, 'time')] + \
[header.split('.') for header in headers[2:]]
assert set(map(len, headers)) == set([2])
results[source_id] = NodeSensorsData(source_id, hostname,
headers, sens_data)
return results
def load_test_timings(fname, max_diff=1000):
raw_map = collections.defaultdict(lambda: [])
class data(object):
pass
load_data_from(fname)(None, data)
for test_type, test_results in data.results.items():
if test_type == 'io':
for tests_res in test_results:
raw_map[tests_res.config.name].append(tests_res.run_interval)
result = {}
for name, intervals in raw_map.items():
intervals.sort()
curr_start, curr_stop = intervals[0]
curr_result = []
for (start, stop) in intervals[1:]:
if abs(curr_start - start) < max_diff:
# if abs(curr_stop - stop) > 2:
# print abs(curr_stop - stop)
assert abs(curr_stop - stop) < max_diff
else:
assert start + max_diff >= curr_stop
assert stop > curr_stop
curr_result.append((curr_start, curr_stop))
curr_start, curr_stop = start, stop
curr_result.append((curr_start, curr_stop))
merged_res = []
curr_start, curr_stop = curr_result[0]
for start, stop in curr_result[1:]:
if abs(curr_stop - start) < max_diff:
curr_stop = stop
else:
merged_res.append((curr_start, curr_stop))
curr_start, curr_stop = start, stop
merged_res.append((curr_start, curr_stop))
result[name] = merged_res
return result
critical_values = dict(
io_queue=1,
usage_percent=0.8,
procs_blocked=1,
procs_queue=1)
class AggregatedData(object):
def __init__(self, sensor_name):
self.sensor_name = sensor_name
# (node, device): count
self.per_device = collections.defaultdict(lambda: 0)
# node: count
self.per_node = collections.defaultdict(lambda: 0)
# role: count
self.per_role = collections.defaultdict(lambda: 0)
# (role_or_node, device_or_*): count
self.all_together = collections.defaultdict(lambda: 0)
def __str__(self):
res = "<AggregatedData({0})>\n".format(self.sensor_name)
for (role_or_node, device), val in self.all_together.items():
res += " {0}:{1} = {2}\n".format(role_or_node, device, val)
return res
def total_consumption(sensors_data, roles_map):
result = {}
for name, sensor_data in sensors_data.items():
for pos, (dev, sensor) in enumerate(sensor_data.headers):
if 'time' == sensor:
continue
try:
ad = result[sensor]
except KeyError:
ad = result[sensor] = AggregatedData(sensor)
val = sum(vals[pos] for vals in sensor_data.values)
ad.per_device[(sensor_data.hostname, dev)] += val
# vals1 = sensors_data['localhost:22']['sdc.sectors_read']
# vals2 = sensors_data['localhost:22']['sdb.sectors_written']
# from matplotlib import pyplot as plt
# plt.plot(range(len(vals1)), vals1)
# plt.plot(range(len(vals2)), vals2)
# plt.show()
# exit(1)
for ad in result.values():
for (hostname, dev), val in ad.per_device.items():
ad.per_node[hostname] += val
for role in roles_map[hostname]:
ad.per_role[role] += val
ad.all_together[(hostname, dev)] = val
for role, val in ad.per_role.items():
ad.all_together[(role, '*')] = val
for node, val in ad.per_node.items():
ad.all_together[(node, '*')] = val
return result
def avg_load(sensors_data):
load = collections.defaultdict(lambda: 0)
min_time = 0xFFFFFFFFFFF
max_time = 0
for sensor_data in sensors_data.values():
min_time = min(min_time, min(sensor_data.times))
max_time = max(max_time, max(sensor_data.times))
for name, max_val in critical_values.items():
for pos, (dev, sensor) in enumerate(sensor_data.headers):
if sensor == name:
for vals in sensor_data.values:
if vals[pos] > max_val:
load[(sensor_data.hostname, dev, sensor)] += 1
return load, max_time - min_time
def print_bottlenecks(sensors_data, max_bottlenecks=15):
load, duration = avg_load(sensors_data)
if not load:
return "\n*** No bottlenecks found *** \n"
rev_items = ((v, k) for (k, v) in load.items())
res = sorted(rev_items, reverse=True)[:max_bottlenecks]
max_name_sz = max(len(name) for _, name in res)
frmt = "{{0:>{0}}} | {{1:>4}}".format(max_name_sz)
table = [frmt.format("Component", "% times load > 100%")]
for (v, k) in res:
table.append(frmt.format(k, int(v * 100.0 / duration + 0.5)))
return "\n".join(table)
def print_consumption(agg, min_transfer=None):
rev_items = []
for (node_or_role, dev), v in agg.all_together.items():
rev_items.append((int(v), node_or_role + ':' + dev))
res = sorted(rev_items, reverse=True)
if min_transfer is not None:
res = [(v, k)
for (v, k) in res
if v >= min_transfer]
if len(res) == 0:
return None
res = [(b2ssize(v) + "B", k) for (v, k) in res]
max_name_sz = max(len(name) for _, name in res)
max_val_sz = max(len(val) for val, _ in res)
frmt = " {{0:>{0}}} | {{1:>{1}}} ".format(max_name_sz, max_val_sz)
table = [frmt.format("Component", "Usage")]
for (v, k) in res:
table.append(frmt.format(k, v))
return "\n".join(table)
def make_roles_mapping(source_id_mapping, source_id2hostname):
result = {}
for ssh_url, roles in source_id_mapping.items():
if '@' in ssh_url:
source_id = ssh_url.split('@')[1]
else:
source_id = ssh_url.split('://')[1]
if source_id.count(':') == 2:
source_id = source_id.rsplit(":", 1)[0]
if source_id.endswith(':'):
source_id += "22"
if source_id in source_id2hostname:
result[source_id] = roles
result[source_id2hostname[source_id]] = roles
for testnode_src in (set(source_id2hostname) - set(result)):
result[testnode_src] = ['testnode']
result[source_id2hostname[testnode_src]] = ['testnode']
return result
def get_testdata_size(consumption):
max_data = 0
for name, sens in SINFO_MAP.items():
if sens.to_bytes_coef is not None:
agg = consumption.get(name)
if agg is not None:
cdt = agg.per_role.get('testnode', 0) * sens.to_bytes_coef
max_data = max(max_data, cdt)
return max_data
def get_testop_cout(consumption):
max_op = 0
for name, sens in SINFO_MAP.items():
if sens.to_bytes_coef is None:
agg = consumption.get(name)
if agg is not None:
max_op = max(max_op, agg.per_role.get('testnode', 0))
return max_op
def get_data_for_intervals(data, intervals):
res = {}
for begin, end in intervals:
for name, node_data in data.items():
ndata = node_data.get_data_for_interval(begin, end)
res[name] = ndata
return res
class Host(object):
def __init__(self, name=None):
self.name = name
self.hdd_devs = {}
self.net_devs = None
def plot_consumption(per_consumer_table, fields, refload):
if pgv is None:
return
hosts = {}
storage_sensors = ('sectors_written', 'sectors_read')
for (hostname, dev), consumption in per_consumer_table.items():
if hostname not in hosts:
hosts[hostname] = Host(hostname)
host = hosts[hostname]
cons_map = dict(zip(fields, consumption))
for sn in storage_sensors:
vl = cons_map.get(sn, 0)
if vl > 0:
host.hdd_devs.setdefault(dev, {})[sn] = vl
p = pgv.AGraph(name='system', directed=True)
net = "Network"
p.add_node(net)
in_color = 'red'
out_color = 'green'
for host in hosts.values():
g = p.subgraph(name="cluster_" + host.name, label=host.name,
color="blue")
g.add_node(host.name, shape="diamond")
p.add_edge(host.name, net)
p.add_edge(net, host.name)
for dev_name, values in host.hdd_devs.items():
if dev_name == '*':
continue
to = values.get('sectors_written', 0)
frm = values.get('sectors_read', 0)
to_pw = 7 * to / refload
frm_pw = 7 * frm / refload
min_with = 0.1
if to_pw > min_with or frm_pw > min_with:
dev_fqn = host.name + "." + dev_name
g.add_node(dev_fqn)
if to_pw > min_with:
g.add_edge(host.name, dev_fqn,
label=b2ssize(to) + "B",
penwidth=to_pw,
fontcolor=out_color,
color=out_color)
if frm_pw > min_with:
g.add_edge(dev_fqn, host.name,
label=b2ssize(frm) + "B",
penwidth=frm_pw,
color=in_color,
fontcolor=in_color)
return p.string()
def parse_args(args):
parser = argparse.ArgumentParser()
parser.add_argument('-t', '--time_period', nargs=2,
type=int, default=None,
help="Begin and end time for tests")
parser.add_argument('-m', '--max-bottlenek', type=int,
default=15, help="Max bottleneck to show")
parser.add_argument('-x', '--max-diff', type=int,
default=10, help="Max bottleneck to show in" +
"0.1% from test nodes summ load")
parser.add_argument('-d', '--debug-ver', action='store_true',
help="Full report with original data")
parser.add_argument('-u', '--user-ver', action='store_true',
default=True, help="Avg load report")
parser.add_argument('-s', '--select-loads', nargs='*', default=[])
parser.add_argument('-f', '--fields', nargs='*', default=[])
parser.add_argument('results_folder')
return parser.parse_args(args[1:])
def main(argv):
opts = parse_args(argv)
stor_dir = os.path.join(opts.results_folder, 'sensor_storage')
data = {}
source_id2hostname = {}
csv_files = os.listdir(stor_dir)
for fname in csv_files:
assert re.match(r"\d+_\d+.csv$", fname)
csv_files.sort(key=lambda x: int(x.split('_')[0]))
for fname in csv_files:
with open(os.path.join(stor_dir, fname)) as fd:
for name, node_sens_data in load_results_csv(fd).items():
if name in data:
assert data[name].hostname == node_sens_data.hostname
assert data[name].source_id == node_sens_data.source_id
assert data[name].headers == node_sens_data.headers
data[name].values.extend(node_sens_data.values)
else:
data[name] = node_sens_data
for nd in data.values():
assert nd.source_id not in source_id2hostname
source_id2hostname[nd.source_id] = nd.hostname
nd.finalize()
roles_file = os.path.join(opts.results_folder,
'nodes.yaml')
src2roles = yaml.load(open(roles_file))
timings = load_test_timings(opts.results_folder)
roles_map = make_roles_mapping(src2roles, source_id2hostname)
max_diff = float(opts.max_diff) / 1000
fields = ('recv_bytes', 'send_bytes',
'sectors_read', 'sectors_written',
'reads_completed', 'writes_completed')
if opts.fields != []:
fields = [field for field in fields if field in opts.fields]
for test_name, intervals in sorted(timings.items()):
if opts.select_loads != []:
if test_name not in opts.select_loads:
continue
data_chunks = get_data_for_intervals(data, intervals)
consumption = total_consumption(data_chunks, roles_map)
bottlenecks = print_bottlenecks(data_chunks)
testdata_sz = get_testdata_size(consumption) * max_diff
testop_count = get_testop_cout(consumption) * max_diff
per_consumer_table = {}
per_consumer_table_str = {}
all_consumers = set()#consumption.values()[0].all_together)
for value in consumption.values():
all_consumers = all_consumers | set(value.all_together)
fields = [field for field in fields if field in consumption]
all_consumers_sum = []
for consumer in all_consumers:
tb_str = per_consumer_table_str[consumer] = []
tb = per_consumer_table[consumer] = []
vl = 0
for name in fields:
val = consumption[name].all_together[consumer]
if SINFO_MAP[name].to_bytes_coef is None:
if val < testop_count:
tb_str.append('0')
else:
tb_str.append(b2ssize_10(int(val)))
else:
val = int(val) * SINFO_MAP[name].to_bytes_coef
if val < testdata_sz:
tb_str.append('-')
else:
tb_str.append(b2ssize(val) + "B")
tb.append(int(val))
vl += int(val)
all_consumers_sum.append((vl, consumer))
all_consumers_sum.sort(reverse=True)
plot_consumption(per_consumer_table, fields,
testdata_sz / max_diff)
tt = texttable.Texttable(max_width=130)
tt.set_cols_align(["l"] + ["r"] * len(fields))
header = ["Name"]
for fld in fields:
if fld in SINFO_MAP:
header.append(SINFO_MAP[fld].print_name)
else:
header.append(fld)
tt.header(header)
for summ, consumer in all_consumers_sum:
if summ > 0:
tt.add_row([":".join(consumer)] +
per_consumer_table_str[consumer])
tt.set_deco(texttable.Texttable.VLINES | texttable.Texttable.HEADER)
res = tt.draw()
max_len = max(map(len, res.split("\n")))
print test_name.center(max_len)
print res
print bottlenecks
if __name__ == "__main__":
exit(main(sys.argv))
|
|
# orm/interfaces.py
# Copyright (C) 2005-2016 the SQLAlchemy authors and contributors
# <see AUTHORS file>
#
# This module is part of SQLAlchemy and is released under
# the MIT License: http://www.opensource.org/licenses/mit-license.php
"""
Contains various base classes used throughout the ORM.
Defines some key base classes prominent within the internals,
as well as the now-deprecated ORM extension classes.
Other than the deprecated extensions, this module and the
classes within are mostly private, though some attributes
are exposed when inspecting mappings.
"""
from __future__ import absolute_import
from .. import util
from ..sql import operators
from .base import (ONETOMANY, MANYTOONE, MANYTOMANY,
EXT_CONTINUE, EXT_STOP, NOT_EXTENSION)
from .base import (InspectionAttr, InspectionAttr,
InspectionAttrInfo, _MappedAttribute)
import collections
from .. import inspect
# imported later
MapperExtension = SessionExtension = AttributeExtension = None
__all__ = (
'AttributeExtension',
'EXT_CONTINUE',
'EXT_STOP',
'ONETOMANY',
'MANYTOMANY',
'MANYTOONE',
'NOT_EXTENSION',
'LoaderStrategy',
'MapperExtension',
'MapperOption',
'MapperProperty',
'PropComparator',
'SessionExtension',
'StrategizedProperty',
)
class MapperProperty(_MappedAttribute, InspectionAttr, util.MemoizedSlots):
"""Represent a particular class attribute mapped by :class:`.Mapper`.
The most common occurrences of :class:`.MapperProperty` are the
mapped :class:`.Column`, which is represented in a mapping as
an instance of :class:`.ColumnProperty`,
and a reference to another class produced by :func:`.relationship`,
represented in the mapping as an instance of
:class:`.RelationshipProperty`.
"""
__slots__ = (
'_configure_started', '_configure_finished', 'parent', 'key',
'info'
)
cascade = frozenset()
"""The set of 'cascade' attribute names.
This collection is checked before the 'cascade_iterator' method is called.
The collection typically only applies to a RelationshipProperty.
"""
is_property = True
"""Part of the InspectionAttr interface; states this object is a
mapper property.
"""
def _memoized_attr_info(self):
"""Info dictionary associated with the object, allowing user-defined
data to be associated with this :class:`.InspectionAttr`.
The dictionary is generated when first accessed. Alternatively,
it can be specified as a constructor argument to the
:func:`.column_property`, :func:`.relationship`, or :func:`.composite`
functions.
.. versionadded:: 0.8 Added support for .info to all
:class:`.MapperProperty` subclasses.
.. versionchanged:: 1.0.0 :attr:`.MapperProperty.info` is also
available on extension types via the
:attr:`.InspectionAttrInfo.info` attribute, so that it can apply
to a wider variety of ORM and extension constructs.
.. seealso::
:attr:`.QueryableAttribute.info`
:attr:`.SchemaItem.info`
"""
return {}
def setup(self, context, entity, path, adapter, **kwargs):
"""Called by Query for the purposes of constructing a SQL statement.
Each MapperProperty associated with the target mapper processes the
statement referenced by the query context, adding columns and/or
criterion as appropriate.
"""
def create_row_processor(self, context, path,
mapper, result, adapter, populators):
"""Produce row processing functions and append to the given
set of populators lists.
"""
def cascade_iterator(self, type_, state, visited_instances=None,
halt_on=None):
"""Iterate through instances related to the given instance for
a particular 'cascade', starting with this MapperProperty.
Return an iterator3-tuples (instance, mapper, state).
Note that the 'cascade' collection on this MapperProperty is
checked first for the given type before cascade_iterator is called.
This method typically only applies to RelationshipProperty.
"""
return iter(())
def set_parent(self, parent, init):
"""Set the parent mapper that references this MapperProperty.
This method is overridden by some subclasses to perform extra
setup when the mapper is first known.
"""
self.parent = parent
def instrument_class(self, mapper):
"""Hook called by the Mapper to the property to initiate
instrumentation of the class attribute managed by this
MapperProperty.
The MapperProperty here will typically call out to the
attributes module to set up an InstrumentedAttribute.
This step is the first of two steps to set up an InstrumentedAttribute,
and is called early in the mapper setup process.
The second step is typically the init_class_attribute step,
called from StrategizedProperty via the post_instrument_class()
hook. This step assigns additional state to the InstrumentedAttribute
(specifically the "impl") which has been determined after the
MapperProperty has determined what kind of persistence
management it needs to do (e.g. scalar, object, collection, etc).
"""
def __init__(self):
self._configure_started = False
self._configure_finished = False
def init(self):
"""Called after all mappers are created to assemble
relationships between mappers and perform other post-mapper-creation
initialization steps.
"""
self._configure_started = True
self.do_init()
self._configure_finished = True
@property
def class_attribute(self):
"""Return the class-bound descriptor corresponding to this
:class:`.MapperProperty`.
This is basically a ``getattr()`` call::
return getattr(self.parent.class_, self.key)
I.e. if this :class:`.MapperProperty` were named ``addresses``,
and the class to which it is mapped is ``User``, this sequence
is possible::
>>> from sqlalchemy import inspect
>>> mapper = inspect(User)
>>> addresses_property = mapper.attrs.addresses
>>> addresses_property.class_attribute is User.addresses
True
>>> User.addresses.property is addresses_property
True
"""
return getattr(self.parent.class_, self.key)
def do_init(self):
"""Perform subclass-specific initialization post-mapper-creation
steps.
This is a template method called by the ``MapperProperty``
object's init() method.
"""
def post_instrument_class(self, mapper):
"""Perform instrumentation adjustments that need to occur
after init() has completed.
The given Mapper is the Mapper invoking the operation, which
may not be the same Mapper as self.parent in an inheritance
scenario; however, Mapper will always at least be a sub-mapper of
self.parent.
This method is typically used by StrategizedProperty, which delegates
it to LoaderStrategy.init_class_attribute() to perform final setup
on the class-bound InstrumentedAttribute.
"""
def merge(self, session, source_state, source_dict, dest_state,
dest_dict, load, _recursive, _resolve_conflict_map):
"""Merge the attribute represented by this ``MapperProperty``
from source to destination object.
"""
def __repr__(self):
return '<%s at 0x%x; %s>' % (
self.__class__.__name__,
id(self), getattr(self, 'key', 'no key'))
class PropComparator(operators.ColumnOperators):
"""Defines SQL operators for :class:`.MapperProperty` objects.
SQLAlchemy allows for operators to
be redefined at both the Core and ORM level. :class:`.PropComparator`
is the base class of operator redefinition for ORM-level operations,
including those of :class:`.ColumnProperty`,
:class:`.RelationshipProperty`, and :class:`.CompositeProperty`.
.. note:: With the advent of Hybrid properties introduced in SQLAlchemy
0.7, as well as Core-level operator redefinition in
SQLAlchemy 0.8, the use case for user-defined :class:`.PropComparator`
instances is extremely rare. See :ref:`hybrids_toplevel` as well
as :ref:`types_operators`.
User-defined subclasses of :class:`.PropComparator` may be created. The
built-in Python comparison and math operator methods, such as
:meth:`.operators.ColumnOperators.__eq__`,
:meth:`.operators.ColumnOperators.__lt__`, and
:meth:`.operators.ColumnOperators.__add__`, can be overridden to provide
new operator behavior. The custom :class:`.PropComparator` is passed to
the :class:`.MapperProperty` instance via the ``comparator_factory``
argument. In each case,
the appropriate subclass of :class:`.PropComparator` should be used::
# definition of custom PropComparator subclasses
from sqlalchemy.orm.properties import \\
ColumnProperty,\\
CompositeProperty,\\
RelationshipProperty
class MyColumnComparator(ColumnProperty.Comparator):
def __eq__(self, other):
return self.__clause_element__() == other
class MyRelationshipComparator(RelationshipProperty.Comparator):
def any(self, expression):
"define the 'any' operation"
# ...
class MyCompositeComparator(CompositeProperty.Comparator):
def __gt__(self, other):
"redefine the 'greater than' operation"
return sql.and_(*[a>b for a, b in
zip(self.__clause_element__().clauses,
other.__composite_values__())])
# application of custom PropComparator subclasses
from sqlalchemy.orm import column_property, relationship, composite
from sqlalchemy import Column, String
class SomeMappedClass(Base):
some_column = column_property(Column("some_column", String),
comparator_factory=MyColumnComparator)
some_relationship = relationship(SomeOtherClass,
comparator_factory=MyRelationshipComparator)
some_composite = composite(
Column("a", String), Column("b", String),
comparator_factory=MyCompositeComparator
)
Note that for column-level operator redefinition, it's usually
simpler to define the operators at the Core level, using the
:attr:`.TypeEngine.comparator_factory` attribute. See
:ref:`types_operators` for more detail.
See also:
:class:`.ColumnProperty.Comparator`
:class:`.RelationshipProperty.Comparator`
:class:`.CompositeProperty.Comparator`
:class:`.ColumnOperators`
:ref:`types_operators`
:attr:`.TypeEngine.comparator_factory`
"""
__slots__ = 'prop', 'property', '_parententity', '_adapt_to_entity'
def __init__(self, prop, parentmapper, adapt_to_entity=None):
self.prop = self.property = prop
self._parententity = adapt_to_entity or parentmapper
self._adapt_to_entity = adapt_to_entity
def __clause_element__(self):
raise NotImplementedError("%r" % self)
def _query_clause_element(self):
return self.__clause_element__()
def adapt_to_entity(self, adapt_to_entity):
"""Return a copy of this PropComparator which will use the given
:class:`.AliasedInsp` to produce corresponding expressions.
"""
return self.__class__(self.prop, self._parententity, adapt_to_entity)
@property
def _parentmapper(self):
"""legacy; this is renamed to _parententity to be
compatible with QueryableAttribute."""
return inspect(self._parententity).mapper
@property
def adapter(self):
"""Produce a callable that adapts column expressions
to suit an aliased version of this comparator.
"""
if self._adapt_to_entity is None:
return None
else:
return self._adapt_to_entity._adapt_element
@property
def info(self):
return self.property.info
@staticmethod
def any_op(a, b, **kwargs):
return a.any(b, **kwargs)
@staticmethod
def has_op(a, b, **kwargs):
return a.has(b, **kwargs)
@staticmethod
def of_type_op(a, class_):
return a.of_type(class_)
def of_type(self, class_):
"""Redefine this object in terms of a polymorphic subclass.
Returns a new PropComparator from which further criterion can be
evaluated.
e.g.::
query.join(Company.employees.of_type(Engineer)).\\
filter(Engineer.name=='foo')
:param \class_: a class or mapper indicating that criterion will be
against this specific subclass.
"""
return self.operate(PropComparator.of_type_op, class_)
def any(self, criterion=None, **kwargs):
"""Return true if this collection contains any member that meets the
given criterion.
The usual implementation of ``any()`` is
:meth:`.RelationshipProperty.Comparator.any`.
:param criterion: an optional ClauseElement formulated against the
member class' table or attributes.
:param \**kwargs: key/value pairs corresponding to member class
attribute names which will be compared via equality to the
corresponding values.
"""
return self.operate(PropComparator.any_op, criterion, **kwargs)
def has(self, criterion=None, **kwargs):
"""Return true if this element references a member which meets the
given criterion.
The usual implementation of ``has()`` is
:meth:`.RelationshipProperty.Comparator.has`.
:param criterion: an optional ClauseElement formulated against the
member class' table or attributes.
:param \**kwargs: key/value pairs corresponding to member class
attribute names which will be compared via equality to the
corresponding values.
"""
return self.operate(PropComparator.has_op, criterion, **kwargs)
class StrategizedProperty(MapperProperty):
"""A MapperProperty which uses selectable strategies to affect
loading behavior.
There is a single strategy selected by default. Alternate
strategies can be selected at Query time through the usage of
``StrategizedOption`` objects via the Query.options() method.
The mechanics of StrategizedProperty are used for every Query
invocation for every mapped attribute participating in that Query,
to determine first how the attribute will be rendered in SQL
and secondly how the attribute will retrieve a value from a result
row and apply it to a mapped object. The routines here are very
performance-critical.
"""
__slots__ = '_strategies', 'strategy'
strategy_wildcard_key = None
def _get_context_loader(self, context, path):
load = None
# use EntityRegistry.__getitem__()->PropRegistry here so
# that the path is stated in terms of our base
search_path = dict.__getitem__(path, self)
# search among: exact match, "attr.*", "default" strategy
# if any.
for path_key in (
search_path._loader_key,
search_path._wildcard_path_loader_key,
search_path._default_path_loader_key
):
if path_key in context.attributes:
load = context.attributes[path_key]
break
return load
def _get_strategy(self, key):
try:
return self._strategies[key]
except KeyError:
cls = self._strategy_lookup(*key)
self._strategies[key] = self._strategies[
cls] = strategy = cls(self, key)
return strategy
def setup(
self, context, entity, path, adapter, **kwargs):
loader = self._get_context_loader(context, path)
if loader and loader.strategy:
strat = self._get_strategy(loader.strategy)
else:
strat = self.strategy
strat.setup_query(context, entity, path, loader, adapter, **kwargs)
def create_row_processor(
self, context, path, mapper,
result, adapter, populators):
loader = self._get_context_loader(context, path)
if loader and loader.strategy:
strat = self._get_strategy(loader.strategy)
else:
strat = self.strategy
strat.create_row_processor(
context, path, loader,
mapper, result, adapter, populators)
def do_init(self):
self._strategies = {}
self.strategy = self._get_strategy(self.strategy_key)
def post_instrument_class(self, mapper):
if not self.parent.non_primary and \
not mapper.class_manager._attr_has_impl(self.key):
self.strategy.init_class_attribute(mapper)
_all_strategies = collections.defaultdict(dict)
@classmethod
def strategy_for(cls, **kw):
def decorate(dec_cls):
# ensure each subclass of the strategy has its
# own _strategy_keys collection
if '_strategy_keys' not in dec_cls.__dict__:
dec_cls._strategy_keys = []
key = tuple(sorted(kw.items()))
cls._all_strategies[cls][key] = dec_cls
dec_cls._strategy_keys.append(key)
return dec_cls
return decorate
@classmethod
def _strategy_lookup(cls, *key):
for prop_cls in cls.__mro__:
if prop_cls in cls._all_strategies:
strategies = cls._all_strategies[prop_cls]
try:
return strategies[key]
except KeyError:
pass
raise Exception("can't locate strategy for %s %s" % (cls, key))
class MapperOption(object):
"""Describe a modification to a Query."""
propagate_to_loaders = False
"""if True, indicate this option should be carried along
to "secondary" Query objects produced during lazy loads
or refresh operations.
"""
def process_query(self, query):
"""Apply a modification to the given :class:`.Query`."""
def process_query_conditionally(self, query):
"""same as process_query(), except that this option may not
apply to the given query.
This is typically used during a lazy load or scalar refresh
operation to propagate options stated in the original Query to the
new Query being used for the load. It occurs for those options that
specify propagate_to_loaders=True.
"""
self.process_query(query)
class LoaderStrategy(object):
"""Describe the loading behavior of a StrategizedProperty object.
The ``LoaderStrategy`` interacts with the querying process in three
ways:
* it controls the configuration of the ``InstrumentedAttribute``
placed on a class to handle the behavior of the attribute. this
may involve setting up class-level callable functions to fire
off a select operation when the attribute is first accessed
(i.e. a lazy load)
* it processes the ``QueryContext`` at statement construction time,
where it can modify the SQL statement that is being produced.
For example, simple column attributes will add their represented
column to the list of selected columns, a joined eager loader
may establish join clauses to add to the statement.
* It produces "row processor" functions at result fetching time.
These "row processor" functions populate a particular attribute
on a particular mapped instance.
"""
__slots__ = 'parent_property', 'is_class_level', 'parent', 'key', \
'strategy_key', 'strategy_opts'
def __init__(self, parent, strategy_key):
self.parent_property = parent
self.is_class_level = False
self.parent = self.parent_property.parent
self.key = self.parent_property.key
self.strategy_key = strategy_key
self.strategy_opts = dict(strategy_key)
def init_class_attribute(self, mapper):
pass
def setup_query(self, context, entity, path, loadopt, adapter, **kwargs):
"""Establish column and other state for a given QueryContext.
This method fulfills the contract specified by MapperProperty.setup().
StrategizedProperty delegates its setup() method
directly to this method.
"""
def create_row_processor(self, context, path, loadopt, mapper,
result, adapter, populators):
"""Establish row processing functions for a given QueryContext.
This method fulfills the contract specified by
MapperProperty.create_row_processor().
StrategizedProperty delegates its create_row_processor() method
directly to this method.
"""
def __str__(self):
return str(self.parent_property)
|
|
# Copyright 2013 dotCloud inc.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import logging
import os
import re
import shlex
import warnings
from datetime import datetime
import six
from . import clientbase
from . import constants
from . import errors
from .auth import auth
from .utils import utils, check_resource
from .constants import INSECURE_REGISTRY_DEPRECATION_WARNING
log = logging.getLogger(__name__)
class Client(clientbase.ClientBase):
@check_resource
def attach(self, container, stdout=True, stderr=True,
stream=False, logs=False):
params = {
'logs': logs and 1 or 0,
'stdout': stdout and 1 or 0,
'stderr': stderr and 1 or 0,
'stream': stream and 1 or 0,
}
u = self._url("/containers/{0}/attach", container)
response = self._post(u, params=params, stream=stream)
return self._get_result(container, stream, response)
@check_resource
def attach_socket(self, container, params=None, ws=False):
if params is None:
params = {
'stdout': 1,
'stderr': 1,
'stream': 1
}
if ws:
return self._attach_websocket(container, params)
u = self._url("/containers/{0}/attach", container)
return self._get_raw_response_socket(self.post(
u, None, params=self._attach_params(params), stream=True))
def build(self, path=None, tag=None, quiet=False, fileobj=None,
nocache=False, rm=False, stream=False, timeout=None,
custom_context=False, encoding=None, pull=False,
forcerm=False, dockerfile=None, container_limits=None,
decode=False):
remote = context = headers = None
container_limits = container_limits or {}
if path is None and fileobj is None:
raise TypeError("Either path or fileobj needs to be provided.")
for key in container_limits.keys():
if key not in constants.CONTAINER_LIMITS_KEYS:
raise errors.DockerException(
'Invalid container_limits key {0}'.format(key)
)
if custom_context:
if not fileobj:
raise TypeError("You must specify fileobj with custom_context")
context = fileobj
elif fileobj is not None:
context = utils.mkbuildcontext(fileobj)
elif path.startswith(('http://', 'https://',
'git://', 'github.com/', 'git@')):
remote = path
elif not os.path.isdir(path):
raise TypeError("You must specify a directory to build in path")
else:
dockerignore = os.path.join(path, '.dockerignore')
exclude = None
if os.path.exists(dockerignore):
with open(dockerignore, 'r') as f:
exclude = list(filter(bool, f.read().splitlines()))
context = utils.tar(path, exclude=exclude, dockerfile=dockerfile)
if utils.compare_version('1.8', self._version) >= 0:
stream = True
if dockerfile and utils.compare_version('1.17', self._version) < 0:
raise errors.InvalidVersion(
'dockerfile was only introduced in API version 1.17'
)
if utils.compare_version('1.19', self._version) < 0:
pull = 1 if pull else 0
u = self._url('/build')
params = {
't': tag,
'remote': remote,
'q': quiet,
'nocache': nocache,
'rm': rm,
'forcerm': forcerm,
'pull': pull,
'dockerfile': dockerfile,
}
params.update(container_limits)
if context is not None:
headers = {'Content-Type': 'application/tar'}
if encoding:
headers['Content-Encoding'] = encoding
if utils.compare_version('1.9', self._version) >= 0:
log.debug('Looking for auth config')
# If we don't have any auth data so far, try reloading the config
# file one more time in case anything showed up in there.
if not self._auth_configs:
log.debug("No auth config in memory - loading from filesystem")
self._auth_configs = auth.load_config()
# Send the full auth configuration (if any exists), since the build
# could use any (or all) of the registries.
if self._auth_configs:
log.debug(
'Sending auth config ({0})'.format(
', '.join(repr(k) for k in self._auth_configs.keys())
)
)
if headers is None:
headers = {}
if utils.compare_version('1.19', self._version) >= 0:
headers['X-Registry-Config'] = auth.encode_header(
self._auth_configs
)
else:
headers['X-Registry-Config'] = auth.encode_header({
'configs': self._auth_configs
})
else:
log.debug('No auth config found')
response = self._post(
u,
data=context,
params=params,
headers=headers,
stream=stream,
timeout=timeout,
)
if context is not None and not custom_context:
context.close()
if stream:
return self._stream_helper(response, decode=decode)
else:
output = self._result(response)
srch = r'Successfully built ([0-9a-f]+)'
match = re.search(srch, output)
if not match:
return None, output
return match.group(1), output
@check_resource
def commit(self, container, repository=None, tag=None, message=None,
author=None, conf=None):
params = {
'container': container,
'repo': repository,
'tag': tag,
'comment': message,
'author': author
}
u = self._url("/commit")
return self._result(self._post_json(u, data=conf, params=params),
json=True)
def containers(self, quiet=False, all=False, trunc=False, latest=False,
since=None, before=None, limit=-1, size=False,
filters=None):
params = {
'limit': 1 if latest else limit,
'all': 1 if all else 0,
'size': 1 if size else 0,
'trunc_cmd': 1 if trunc else 0,
'since': since,
'before': before
}
if filters:
params['filters'] = utils.convert_filters(filters)
u = self._url("/containers/json")
res = self._result(self._get(u, params=params), True)
if quiet:
return [{'Id': x['Id']} for x in res]
if trunc:
for x in res:
x['Id'] = x['Id'][:12]
return res
@check_resource
def copy(self, container, resource):
res = self._post_json(
self._url("/containers/{0}/copy".format(container)),
data={"Resource": resource},
stream=True
)
self._raise_for_status(res)
return res.raw
def create_container(self, image, command=None, hostname=None, user=None,
detach=False, stdin_open=False, tty=False,
mem_limit=None, ports=None, environment=None,
dns=None, volumes=None, volumes_from=None,
network_disabled=False, name=None, entrypoint=None,
cpu_shares=None, working_dir=None, domainname=None,
memswap_limit=None, cpuset=None, host_config=None,
mac_address=None, labels=None, volume_driver=None):
if isinstance(volumes, six.string_types):
volumes = [volumes, ]
if host_config and utils.compare_version('1.15', self._version) < 0:
raise errors.InvalidVersion(
'host_config is not supported in API < 1.15'
)
config = self.create_container_config(
image, command, hostname, user, detach, stdin_open,
tty, mem_limit, ports, environment, dns, volumes, volumes_from,
network_disabled, entrypoint, cpu_shares, working_dir, domainname,
memswap_limit, cpuset, host_config, mac_address, labels,
volume_driver
)
return self.create_container_from_config(config, name)
def create_container_config(self, *args, **kwargs):
return utils.create_container_config(self._version, *args, **kwargs)
def create_container_from_config(self, config, name=None):
u = self._url("/containers/create")
params = {
'name': name
}
res = self._post_json(u, data=config, params=params)
return self._result(res, True)
def create_host_config(self, *args, **kwargs):
if not kwargs:
kwargs = {}
if 'version' in kwargs:
raise TypeError(
"create_host_config() got an unexpected "
"keyword argument 'version'"
)
kwargs['version'] = self._version
return utils.create_host_config(*args, **kwargs)
@check_resource
def diff(self, container):
return self._result(
self._get(self._url("/containers/{0}/changes", container)), True
)
def events(self, since=None, until=None, filters=None, decode=None):
if isinstance(since, datetime):
since = utils.datetime_to_timestamp(since)
if isinstance(until, datetime):
until = utils.datetime_to_timestamp(until)
if filters:
filters = utils.convert_filters(filters)
params = {
'since': since,
'until': until,
'filters': filters
}
return self._stream_helper(
self.get(self._url('/events'), params=params, stream=True),
decode=decode
)
@check_resource
def exec_create(self, container, cmd, stdout=True, stderr=True, tty=False,
privileged=False, user=''):
if utils.compare_version('1.15', self._version) < 0:
raise errors.InvalidVersion('Exec is not supported in API < 1.15')
if privileged and utils.compare_version('1.19', self._version) < 0:
raise errors.InvalidVersion(
'Privileged exec is not supported in API < 1.19'
)
if user and utils.compare_version('1.19', self._version) < 0:
raise errors.InvalidVersion(
'User-specific exec is not supported in API < 1.19'
)
if isinstance(cmd, six.string_types):
cmd = shlex.split(str(cmd))
data = {
'Container': container,
'User': user,
'Privileged': privileged,
'Tty': tty,
'AttachStdin': False,
'AttachStdout': stdout,
'AttachStderr': stderr,
'Cmd': cmd
}
url = self._url('/containers/{0}/exec', container)
res = self._post_json(url, data=data)
return self._result(res, True)
def exec_inspect(self, exec_id):
if utils.compare_version('1.16', self._version) < 0:
raise errors.InvalidVersion(
'exec_inspect is not supported in API < 1.16'
)
if isinstance(exec_id, dict):
exec_id = exec_id.get('Id')
res = self._get(self._url("/exec/{0}/json", exec_id))
return self._result(res, True)
def exec_resize(self, exec_id, height=None, width=None):
if utils.compare_version('1.15', self._version) < 0:
raise errors.InvalidVersion('Exec is not supported in API < 1.15')
if isinstance(exec_id, dict):
exec_id = exec_id.get('Id')
params = {'h': height, 'w': width}
url = self._url("/exec/{0}/resize", exec_id)
res = self._post(url, params=params)
self._raise_for_status(res)
def exec_start(self, exec_id, detach=False, tty=False, stream=False):
if utils.compare_version('1.15', self._version) < 0:
raise errors.InvalidVersion('Exec is not supported in API < 1.15')
if isinstance(exec_id, dict):
exec_id = exec_id.get('Id')
data = {
'Tty': tty,
'Detach': detach
}
res = self._post_json(
self._url('/exec/{0}/start', exec_id), data=data, stream=stream
)
return self._get_result_tty(stream, res, tty)
@check_resource
def export(self, container):
res = self._get(
self._url("/containers/{0}/export", container), stream=True
)
self._raise_for_status(res)
return res.raw
@check_resource
def get_image(self, image):
res = self._get(self._url("/images/{0}/get", image), stream=True)
self._raise_for_status(res)
return res.raw
@check_resource
def history(self, image):
res = self._get(self._url("/images/{0}/history", image))
return self._result(res, True)
def images(self, name=None, quiet=False, all=False, viz=False,
filters=None):
if viz:
if utils.compare_version('1.7', self._version) >= 0:
raise Exception('Viz output is not supported in API >= 1.7!')
return self._result(self._get(self._url("images/viz")))
params = {
'filter': name,
'only_ids': 1 if quiet else 0,
'all': 1 if all else 0,
}
if filters:
params['filters'] = utils.convert_filters(filters)
res = self._result(self._get(self._url("/images/json"), params=params),
True)
if quiet:
return [x['Id'] for x in res]
return res
def import_image(self, src=None, repository=None, tag=None, image=None):
if src:
if isinstance(src, six.string_types):
try:
result = self.import_image_from_file(
src, repository=repository, tag=tag)
except IOError:
result = self.import_image_from_url(
src, repository=repository, tag=tag)
else:
result = self.import_image_from_data(
src, repository=repository, tag=tag)
elif image:
result = self.import_image_from_image(
image, repository=repository, tag=tag)
else:
raise Exception("Must specify a src or image")
return result
def import_image_from_data(self, data, repository=None, tag=None):
u = self._url("/images/create")
params = {
'fromSrc': '-',
'repo': repository,
'tag': tag
}
headers = {
'Content-Type': 'application/tar',
}
return self._result(
self._post(u, data=data, params=params, headers=headers))
def import_image_from_file(self, filename, repository=None, tag=None):
u = self._url("/images/create")
params = {
'fromSrc': '-',
'repo': repository,
'tag': tag
}
headers = {
'Content-Type': 'application/tar',
}
with open(filename, 'rb') as f:
return self._result(
self._post(u, data=f, params=params, headers=headers,
timeout=None))
def import_image_from_stream(self, stream, repository=None, tag=None):
u = self._url("/images/create")
params = {
'fromSrc': '-',
'repo': repository,
'tag': tag
}
headers = {
'Content-Type': 'application/tar',
'Transfer-Encoding': 'chunked',
}
return self._result(
self._post(u, data=stream, params=params, headers=headers))
def import_image_from_url(self, url, repository=None, tag=None):
u = self._url("/images/create")
params = {
'fromSrc': url,
'repo': repository,
'tag': tag
}
return self._result(
self._post(u, data=None, params=params))
def import_image_from_image(self, image, repository=None, tag=None):
u = self._url("/images/create")
params = {
'fromImage': image,
'repo': repository,
'tag': tag
}
return self._result(
self._post(u, data=None, params=params))
def info(self):
return self._result(self._get(self._url("/info")),
True)
@check_resource
def insert(self, image, url, path):
if utils.compare_version('1.12', self._version) >= 0:
raise errors.DeprecatedMethod(
'insert is not available for API version >=1.12'
)
api_url = self._url("/images/{0}/insert", image)
params = {
'url': url,
'path': path
}
return self._result(self._post(api_url, params=params))
@check_resource
def inspect_container(self, container):
return self._result(
self._get(self._url("/containers/{0}/json", container)), True
)
@check_resource
def inspect_image(self, image):
return self._result(
self._get(self._url("/images/{0}/json", image)), True
)
@check_resource
def kill(self, container, signal=None):
url = self._url("/containers/{0}/kill", container)
params = {}
if signal is not None:
params['signal'] = signal
res = self._post(url, params=params)
self._raise_for_status(res)
def load_image(self, data):
res = self._post(self._url("/images/load"), data=data)
self._raise_for_status(res)
def login(self, username, password=None, email=None, registry=None,
reauth=False, insecure_registry=False, dockercfg_path=None):
if insecure_registry:
warnings.warn(
INSECURE_REGISTRY_DEPRECATION_WARNING.format('login()'),
DeprecationWarning
)
# If we don't have any auth data so far, try reloading the config file
# one more time in case anything showed up in there.
# If dockercfg_path is passed check to see if the config file exists,
# if so load that config.
if dockercfg_path and os.path.exists(dockercfg_path):
self._auth_configs = auth.load_config(dockercfg_path)
elif not self._auth_configs:
self._auth_configs = auth.load_config()
registry = registry or auth.INDEX_URL
authcfg = auth.resolve_authconfig(self._auth_configs, registry)
# If we found an existing auth config for this registry and username
# combination, we can return it immediately unless reauth is requested.
if authcfg and authcfg.get('username', None) == username \
and not reauth:
return authcfg
req_data = {
'username': username,
'password': password,
'email': email,
'serveraddress': registry,
}
response = self._post_json(self._url('/auth'), data=req_data)
if response.status_code == 200:
self._auth_configs[registry] = req_data
return self._result(response, json=True)
@check_resource
def logs(self, container, stdout=True, stderr=True, stream=False,
timestamps=False, tail='all'):
if utils.compare_version('1.11', self._version) >= 0:
params = {'stderr': stderr and 1 or 0,
'stdout': stdout and 1 or 0,
'timestamps': timestamps and 1 or 0,
'follow': stream and 1 or 0,
}
if utils.compare_version('1.13', self._version) >= 0:
if tail != 'all' and (not isinstance(tail, int) or tail <= 0):
tail = 'all'
params['tail'] = tail
url = self._url("/containers/{0}/logs", container)
res = self._get(url, params=params, stream=stream)
return self._get_result(container, stream, res)
return self.attach(
container,
stdout=stdout,
stderr=stderr,
stream=stream,
logs=True
)
@check_resource
def pause(self, container):
url = self._url('/containers/{0}/pause', container)
res = self._post(url)
self._raise_for_status(res)
def ping(self):
return self._result(self._get(self._url('/_ping')))
@check_resource
def port(self, container, private_port):
res = self._get(self._url("/containers/{0}/json", container))
self._raise_for_status(res)
json_ = res.json()
s_port = str(private_port)
h_ports = None
# Port settings is None when the container is running with
# network_mode=host.
port_settings = json_.get('NetworkSettings', {}).get('Ports')
if port_settings is None:
return None
h_ports = port_settings.get(s_port + '/udp')
if h_ports is None:
h_ports = port_settings.get(s_port + '/tcp')
return h_ports
def pull(self, repository, tag=None, stream=False,
insecure_registry=False, auth_config=None):
if insecure_registry:
warnings.warn(
INSECURE_REGISTRY_DEPRECATION_WARNING.format('pull()'),
DeprecationWarning
)
if not tag:
repository, tag = utils.parse_repository_tag(repository)
registry, repo_name = auth.resolve_repository_name(repository)
if repo_name.count(":") == 1:
repository, tag = repository.rsplit(":", 1)
params = {
'tag': tag,
'fromImage': repository
}
headers = {}
if utils.compare_version('1.5', self._version) >= 0:
# If we don't have any auth data so far, try reloading the config
# file one more time in case anything showed up in there.
if auth_config is None:
log.debug('Looking for auth config')
if not self._auth_configs:
log.debug(
"No auth config in memory - loading from filesystem")
self._auth_configs = auth.load_config()
authcfg = auth.resolve_authconfig(self._auth_configs, registry)
# Do not fail here if no authentication exists for this
# specific registry as we can have a readonly pull. Just
# put the header if we can.
if authcfg:
log.debug('Found auth config')
# auth_config needs to be a dict in the format used by
# auth.py username , password, serveraddress, email
headers['X-Registry-Auth'] = auth.encode_header(
authcfg
)
else:
log.debug('No auth config found')
else:
log.debug('Sending supplied auth config')
headers['X-Registry-Auth'] = auth.encode_header(auth_config)
response = self._post(
self._url('/images/create'), params=params, headers=headers,
stream=stream, timeout=None
)
self._raise_for_status(response)
if stream:
return self._stream_helper(response)
return self._result(response)
def push(self, repository, tag=None, stream=False,
insecure_registry=False):
if insecure_registry:
warnings.warn(
INSECURE_REGISTRY_DEPRECATION_WARNING.format('push()'),
DeprecationWarning
)
if not tag:
repository, tag = utils.parse_repository_tag(repository)
registry, repo_name = auth.resolve_repository_name(repository)
u = self._url("/images/{0}/push", repository)
params = {
'tag': tag
}
headers = {}
if utils.compare_version('1.5', self._version) >= 0:
# If we don't have any auth data so far, try reloading the config
# file one more time in case anything showed up in there.
if not self._auth_configs:
self._auth_configs = auth.load_config()
authcfg = auth.resolve_authconfig(self._auth_configs, registry)
# Do not fail here if no authentication exists for this specific
# registry as we can have a readonly pull. Just put the header if
# we can.
if authcfg:
headers['X-Registry-Auth'] = auth.encode_header(authcfg)
response = self._post_json(
u, None, headers=headers, stream=stream, params=params
)
self._raise_for_status(response)
if stream:
return self._stream_helper(response)
return self._result(response)
@check_resource
def remove_container(self, container, v=False, link=False, force=False):
params = {'v': v, 'link': link, 'force': force}
res = self._delete(
self._url("/containers/{0}", container), params=params
)
self._raise_for_status(res)
@check_resource
def remove_image(self, image, force=False, noprune=False):
params = {'force': force, 'noprune': noprune}
res = self._delete(self._url("/images/{0}", image), params=params)
self._raise_for_status(res)
@check_resource
def rename(self, container, name):
if utils.compare_version('1.17', self._version) < 0:
raise errors.InvalidVersion(
'rename was only introduced in API version 1.17'
)
url = self._url("/containers/{0}/rename", container)
params = {'name': name}
res = self._post(url, params=params)
self._raise_for_status(res)
@check_resource
def resize(self, container, height, width):
params = {'h': height, 'w': width}
url = self._url("/containers/{0}/resize", container)
res = self._post(url, params=params)
self._raise_for_status(res)
@check_resource
def restart(self, container, timeout=10):
params = {'t': timeout}
url = self._url("/containers/{0}/restart", container)
res = self._post(url, params=params)
self._raise_for_status(res)
def search(self, term):
return self._result(
self._get(self._url("/images/search"), params={'term': term}),
True
)
@check_resource
def start(self, container, binds=None, port_bindings=None, lxc_conf=None,
publish_all_ports=None, links=None, privileged=None,
dns=None, dns_search=None, volumes_from=None, network_mode=None,
restart_policy=None, cap_add=None, cap_drop=None, devices=None,
extra_hosts=None, read_only=None, pid_mode=None, ipc_mode=None,
security_opt=None, ulimits=None):
if utils.compare_version('1.10', self._version) < 0:
if dns is not None:
raise errors.InvalidVersion(
'dns is only supported for API version >= 1.10'
)
if volumes_from is not None:
raise errors.InvalidVersion(
'volumes_from is only supported for API version >= 1.10'
)
if utils.compare_version('1.15', self._version) < 0:
if security_opt is not None:
raise errors.InvalidVersion(
'security_opt is only supported for API version >= 1.15'
)
if ipc_mode:
raise errors.InvalidVersion(
'ipc_mode is only supported for API version >= 1.15'
)
if utils.compare_version('1.17', self._version) < 0:
if read_only is not None:
raise errors.InvalidVersion(
'read_only is only supported for API version >= 1.17'
)
if pid_mode is not None:
raise errors.InvalidVersion(
'pid_mode is only supported for API version >= 1.17'
)
if utils.compare_version('1.18', self._version) < 0:
if ulimits is not None:
raise errors.InvalidVersion(
'ulimits is only supported for API version >= 1.18'
)
start_config_kwargs = dict(
binds=binds, port_bindings=port_bindings, lxc_conf=lxc_conf,
publish_all_ports=publish_all_ports, links=links, dns=dns,
privileged=privileged, dns_search=dns_search, cap_add=cap_add,
cap_drop=cap_drop, volumes_from=volumes_from, devices=devices,
network_mode=network_mode, restart_policy=restart_policy,
extra_hosts=extra_hosts, read_only=read_only, pid_mode=pid_mode,
ipc_mode=ipc_mode, security_opt=security_opt, ulimits=ulimits
)
start_config = None
if any(v is not None for v in start_config_kwargs.values()):
if utils.compare_version('1.15', self._version) > 0:
warnings.warn(
'Passing host config parameters in start() is deprecated. '
'Please use host_config in create_container instead!',
DeprecationWarning
)
start_config = self.create_host_config(**start_config_kwargs)
url = self._url("/containers/{0}/start", container)
res = self._post_json(url, data=start_config)
self._raise_for_status(res)
@check_resource
def stats(self, container, decode=None):
if utils.compare_version('1.17', self._version) < 0:
raise errors.InvalidVersion(
'Stats retrieval is not supported in API < 1.17!')
url = self._url("/containers/{0}/stats", container)
return self._stream_helper(self._get(url, stream=True), decode=decode)
@check_resource
def stop(self, container, timeout=10):
params = {'t': timeout}
url = self._url("/containers/{0}/stop", container)
res = self._post(url, params=params,
timeout=(timeout + (self.timeout or 0)))
self._raise_for_status(res)
@check_resource
def tag(self, image, repository, tag=None, force=False):
params = {
'tag': tag,
'repo': repository,
'force': 1 if force else 0
}
url = self._url("/images/{0}/tag", image)
res = self._post(url, params=params)
self._raise_for_status(res)
return res.status_code == 201
@check_resource
def top(self, container):
u = self._url("/containers/{0}/top", container)
return self._result(self._get(u), True)
def version(self, api_version=True):
url = self._url("/version", versioned_api=api_version)
return self._result(self._get(url), json=True)
@check_resource
def unpause(self, container):
url = self._url('/containers/{0}/unpause', container)
res = self._post(url)
self._raise_for_status(res)
@check_resource
def wait(self, container, timeout=None):
url = self._url("/containers/{0}/wait", container)
res = self._post(url, timeout=timeout)
self._raise_for_status(res)
json_ = res.json()
if 'StatusCode' in json_:
return json_['StatusCode']
return -1
class AutoVersionClient(Client):
def __init__(self, *args, **kwargs):
if 'version' in kwargs and kwargs['version']:
raise errors.DockerException(
'Can not specify version for AutoVersionClient'
)
kwargs['version'] = 'auto'
super(AutoVersionClient, self).__init__(*args, **kwargs)
|
|
##########################################################################
#
# Copyright 2011 Jose Fonseca
# All Rights Reserved.
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
#
##########################################################################/
"""D3D retracer generator."""
import sys
from dllretrace import DllRetracer as Retracer
import specs.dxgi
from specs.stdapi import API
from specs.winapi import LPCSTR
from specs.dxgi import dxgi
from specs.d3d10 import d3d10, d3d10_1
from specs.d3d11 import d3d11
from specs.dcomp import dcomp
class D3DRetracer(Retracer):
def retraceApi(self, api):
print('// Swizzling mapping for lock addresses, mapping a (pDeviceContext, pResource, Subresource) -> void *')
print('typedef std::pair< IUnknown *, UINT > SubresourceKey;')
print('static std::map< IUnknown *, std::map< SubresourceKey, void * > > g_Maps;')
print()
self.table_name = 'd3dretrace::dxgi_callbacks'
Retracer.retraceApi(self, api)
createDeviceFunctionNames = [
"D3D10CreateDevice",
"D3D10CreateDeviceAndSwapChain",
"D3D10CreateDevice1",
"D3D10CreateDeviceAndSwapChain1",
"D3D11CreateDevice",
"D3D11CreateDeviceAndSwapChain",
]
def invokeFunction(self, function):
if function.name in self.createDeviceFunctionNames:
# create windows as neccessary
if 'pSwapChainDesc' in function.argNames():
print(r' if (pSwapChainDesc) {')
print(r' d3dretrace::createWindowForSwapChain(pSwapChainDesc);')
print(r' }')
# Compensate for the fact we don't trace DXGI object creation
if function.name.startswith('D3D11CreateDevice'):
print(r' if (DriverType == D3D_DRIVER_TYPE_UNKNOWN && !pAdapter) {')
print(r' DriverType = D3D_DRIVER_TYPE_HARDWARE;')
print(r' }')
if function.name.startswith('D3D10CreateDevice'):
# Toggle debugging
print(r' if (retrace::debug >= 2) {')
print(r' Flags |= D3D10_CREATE_DEVICE_DEBUG;')
print(r' } else if (retrace::debug < 0) {')
print(r' Flags &= ~D3D10_CREATE_DEVICE_DEBUG;')
print(r' }')
# D3D10CreateDevice(D3D10_DRIVER_TYPE_REFERENCE) fails with
# DXGI_ERROR_UNSUPPORTED on 64bits.
print(r'#ifdef _WIN64')
print(r' if (DriverType == D3D10_DRIVER_TYPE_REFERENCE) {')
print(r' DriverType = D3D10_DRIVER_TYPE_WARP;')
print(r' }')
print(r'#endif')
# Force driver
self.forceDriver('D3D10_DRIVER_TYPE_HARDWARE')
if function.name.startswith('D3D11CreateDevice'):
# Toggle debugging
print(r' if (retrace::debug >= 2) {')
print(r' Flags |= D3D11_CREATE_DEVICE_DEBUG;')
print(r' } else if (retrace::debug < 0) {')
print(r' Flags &= ~D3D11_CREATE_DEVICE_DEBUG;')
print(r' }')
print(r' if (IsWindows8OrGreater()) {')
print(r' Flags |= D3D11_CREATE_DEVICE_DISABLE_GPU_TIMEOUT;')
print(r' }')
# Force driver
self.forceDriver('D3D_DRIVER_TYPE_UNKNOWN')
Retracer.invokeFunction(self, function)
def doInvokeFunction(self, function):
Retracer.doInvokeFunction(self, function)
# Handle missing debug layer. While it's possible to detect whether
# the debug layers are present, by creating a null device, and checking
# the result. It's simpler to retry.
if function.name.startswith('D3D10CreateDevice'):
print(r' if ((_result == E_FAIL || _result == DXGI_ERROR_SDK_COMPONENT_MISSING) && (Flags & D3D10_CREATE_DEVICE_DEBUG)) {')
print(r' retrace::warning(call) << "Direct3D 10.x SDK Debug Layer (d3d10sdklayers.dll) not available, continuing without debug output\n";')
print(r' Flags &= ~D3D10_CREATE_DEVICE_DEBUG;')
Retracer.doInvokeFunction(self, function)
print(r' }')
if function.name.startswith('D3D11CreateDevice'):
print(r' if ((_result == E_FAIL || _result == DXGI_ERROR_SDK_COMPONENT_MISSING) && (Flags & D3D11_CREATE_DEVICE_DEBUG)) {')
print(r' retrace::warning(call) << "Direct3D 11.x SDK Debug Layer (d3d11*sdklayers.dll) not available, continuing without debug output\n";')
print(r' Flags &= ~D3D11_CREATE_DEVICE_DEBUG;')
Retracer.doInvokeFunction(self, function)
print(r' }')
def handleFailure(self, interface, methodOrFunction):
# Catch when device is removed, and report the reason.
if interface is not None:
print(r' if (_result == DXGI_ERROR_DEVICE_REMOVED) {')
print(r' d3dretrace::deviceRemoved(call, _this);')
print(r' }')
Retracer.handleFailure(self, interface, methodOrFunction)
def forceDriver(self, driverType):
# This can only work when pAdapter is NULL. For non-NULL pAdapter we
# need to override inside the EnumAdapters call below
print(r' ComPtr<IDXGIFactory1> _pFactory;')
print(r' ComPtr<IDXGIAdapter> _pAdapter;')
print(r' if (pAdapter == nullptr && retrace::driver != retrace::DRIVER_DEFAULT) {')
print(r' _result = CreateDXGIFactory1(IID_IDXGIFactory1, &_pFactory);')
print(r' assert(SUCCEEDED(_result));')
print(r' _result = d3dretrace::createAdapter(_pFactory.Get(), IID_IDXGIAdapter1, &_pAdapter);')
print(r' pAdapter = _pAdapter.Get();')
print(r' DriverType = %s;' % driverType)
print(r' Software = nullptr;')
print(r' }')
print(r' if (Software) {')
print(r' Software = LoadLibraryA("d3d10warp.dll");')
print(r' assert(Software != nullptr);')
print(r' }')
def doInvokeInterfaceMethod(self, interface, method):
if interface.name.startswith('IDXGIAdapter') and method.name == 'EnumOutputs':
print(r' if (Output != 0) {')
print(r' retrace::warning(call) << "ignoring output " << Output << "\n";')
print(r' Output = 0;')
print(r' }')
# GPU counters are vendor specific and likely to fail, so use a
# timestamp query instead, which is guaranteed to succeed
if method.name == 'CreateCounter':
if interface.name.startswith('ID3D10'):
print(r' D3D10_QUERY_DESC _queryDesc;')
print(r' _queryDesc.Query = D3D10_QUERY_TIMESTAMP;')
print(r' _queryDesc.MiscFlags = 0;')
print(r' _result = _this->CreateQuery(&_queryDesc, reinterpret_cast<ID3D10Query **>(ppCounter));')
return
if interface.name.startswith('ID3D11'):
print(r' D3D11_QUERY_DESC _queryDesc;')
print(r' _queryDesc.Query = D3D11_QUERY_TIMESTAMP;')
print(r' _queryDesc.MiscFlags = 0;')
print(r' _result = _this->CreateQuery(&_queryDesc, reinterpret_cast<ID3D11Query **>(ppCounter));')
return
Retracer.doInvokeInterfaceMethod(self, interface, method)
# Force driver
if interface.name.startswith('IDXGIFactory') and method.name.startswith('EnumAdapters'):
print(r' if (Adapter != 0) {')
print(r' retrace::warning(call) << "ignoring non-default adapter " << Adapter << "\n";')
print(r' Adapter = 0;')
print(r' }')
print(r' if (retrace::driver != retrace::DRIVER_DEFAULT) {')
print(r' _result = d3dretrace::createAdapter(_this, IID_IDXGIAdapter1, (void **)ppAdapter);')
print(r' } else {')
Retracer.doInvokeInterfaceMethod(self, interface, method)
print(r' }')
return
if interface.name.startswith('IDXGIFactory') and method.name.startswith('EnumAdapterByLuid'):
print(r' retrace::warning(call) << "ignoring adapter LUID, returning adapter 0\n";')
print(r' if (retrace::driver != retrace::DRIVER_DEFAULT) {')
print(r' _result = d3dretrace::createAdapter(_this, riid, ppvAdapter);')
print(r' } else {')
print(r' if (ppvAdapter)')
print(r' *ppvAdapter = nullptr;')
print(r' IDXGIAdapter1 *_temp_adapter;')
print(r' _result = _this->EnumAdapters1(0, &_temp_adapter);')
print(r' if (SUCCEEDED(_result)) {')
print(r' _result = _temp_adapter->QueryInterface(riid, ppvAdapter);')
print(r' _temp_adapter->Release();')
print(r' }')
print(r' }')
return
if interface.name.startswith('IDXGIFactory') and method.name.startswith('EnumAdapterByGpuPreference'):
print(r' if (Adapter != 0) {')
print(r' retrace::warning(call) << "ignoring non-default adapter " << Adapter << "\n";')
print(r' Adapter = 0;')
print(r' }')
print(r' if (retrace::driver != retrace::DRIVER_DEFAULT) {')
print(r' _result = d3dretrace::createAdapter(_this, riid, ppvAdapter);')
print(r' } else {')
Retracer.doInvokeInterfaceMethod(self, interface, method)
print(r' }')
return
if interface.name.startswith('IDXGIFactory') and method.name == 'CreateSoftwareAdapter':
print(r' const char *szSoftware = NULL;')
print(r' switch (retrace::driver) {')
print(r' case retrace::DRIVER_REFERENCE:')
print(r' szSoftware = "d3d11ref.dll";')
print(r' break;')
print(r' case retrace::DRIVER_MODULE:')
print(r' szSoftware = retrace::driverModule;')
print(r' break;')
print(r' case retrace::DRIVER_SOFTWARE:')
print(r' default:')
print(r' szSoftware = "d3d10warp.dll";')
print(r' break;')
print(r' }')
print(r' Module = LoadLibraryA("d3d10warp");')
print(r' if (!Module) {')
print(r' retrace::warning(call) << "failed to load " << szSoftware << "\n";')
print(r' }')
Retracer.doInvokeInterfaceMethod(self, interface, method)
# Keep retrying ID3D11VideoContext::DecoderBeginFrame when returns E_PENDING
if interface.name == 'ID3D11VideoContext' and method.name == 'DecoderBeginFrame':
print(r' while (_result == D3DERR_WASSTILLDRAWING || _result == E_PENDING) {')
print(r' Sleep(1);')
Retracer.doInvokeInterfaceMethod(self, interface, method)
print(r' }')
def invokeInterfaceMethod(self, interface, method):
# keep track of the last used device for state dumping
if interface.name in ('ID3D10Device', 'ID3D10Device1'):
if method.name == 'Release':
print(r' if (call.ret->toUInt() == 0) {')
print(r' d3d10Dumper.unbindDevice(_this);')
print(r' }')
else:
print(r' d3d10Dumper.bindDevice(_this);')
if interface.name.startswith('ID3D11DeviceContext'):
if method.name == 'Release':
print(r' if (call.ret->toUInt() == 0) {')
print(r' d3d11Dumper.unbindDevice(_this);')
print(r' }')
else:
print(r' d3d11Dumper.bindDevice(_this);')
# intercept private interfaces
if method.name == 'QueryInterface':
print(r' if (!d3dretrace::overrideQueryInterface(_this, riid, ppvObj, &_result)) {')
Retracer.invokeInterfaceMethod(self, interface, method)
print(r' }')
return
# create windows as neccessary
if method.name == 'CreateSwapChain':
print(r' d3dretrace::createWindowForSwapChain(pDesc);')
if method.name == 'CreateSwapChainForHwnd':
print(r' hWnd = d3dretrace::createWindow(pDesc->Width, pDesc->Height);')
print(r' // DXGI_SCALING_NONE is only supported on Win8 and beyond')
print(r' if (pDesc->Scaling == DXGI_SCALING_NONE && !IsWindows8OrGreater()) {')
print(r' pDesc->Scaling = DXGI_SCALING_STRETCH;')
print(r' }')
if method.name == 'CreateSwapChainForComposition':
print(r' HWND hWnd = d3dretrace::createWindow(pDesc->Width, pDesc->Height);')
print(r' _result = _this->CreateSwapChainForHwnd(pDevice, hWnd, pDesc, NULL, pRestrictToOutput, ppSwapChain);')
self.checkResult(interface, method)
return
if method.name == 'CreateSwapChainForCompositionSurfaceHandle':
print(r' ComPtr<IDXGIFactory2> pFactory;')
print(r' _result = _this->QueryInterface(IID_IDXGIFactory2, &pFactory);')
print(r' assert(SUCCEEDED(_result));')
print(r' HWND hWnd = d3dretrace::createWindow(pDesc->Width, pDesc->Height);')
print(r' pDesc->Flags &= ~DXGI_SWAP_CHAIN_FLAG_FULLSCREEN_VIDEO;')
print(r' _result = pFactory->CreateSwapChainForHwnd(pDevice, hWnd, pDesc, NULL, pRestrictToOutput, ppSwapChain);')
self.checkResult(interface, method)
return
if method.name == 'ResizeBuffers':
print(r' SwapChainFlags &= ~DXGI_SWAP_CHAIN_FLAG_FULLSCREEN_VIDEO;')
if method.name == 'CreateTargetForHwnd':
print(r' hwnd = d3dretrace::createWindow(1024, 768);')
if method.name == 'SetFullscreenState':
print(r' if (retrace::forceWindowed) {')
print(r' DXGI_SWAP_CHAIN_DESC Desc;')
print(r' _this->GetDesc(&Desc);')
print(r' if (Desc.BufferDesc.Format != DXGI_FORMAT_R10G10B10_XR_BIAS_A2_UNORM) {')
print(r' Fullscreen = FALSE;')
print(r' pTarget = nullptr;')
print(r' }')
print(r' }')
# notify frame has been completed
if interface.name.startswith('IDXGISwapChain') and method.name.startswith('Present'):
# Reset _DO_NOT_WAIT flags. Otherwise they may fail, and we have no
# way to cope with it (other than retry).
print(r' Flags &= ~DXGI_PRESENT_DO_NOT_WAIT;')
if interface.name.startswith('IDXGISwapChainDWM'):
print(r' ComPtr<IDXGISwapChain> pSwapChain;')
print(r' if (SUCCEEDED(_this->QueryInterface(IID_IDXGISwapChain, &pSwapChain))) {')
print(r' dxgiDumper.bindDevice(pSwapChain.Get());')
print(r' } else {')
print(r' assert(0);')
print(r' }')
else:
print(r' dxgiDumper.bindDevice(_this);')
print(r' if ((Flags & DXGI_PRESENT_TEST) == 0) {')
print(r' retrace::frameComplete(call);')
print(r' }')
if 'pSharedResource' in method.argNames():
print(r' if (pSharedResource) {')
print(r' retrace::warning(call) << "shared surfaces unsupported\n";')
print(r' pSharedResource = NULL;')
print(r' }')
if interface.name.startswith('ID3D10Device') and method.name.startswith('OpenSharedResource'):
print(r' retrace::warning(call) << "replacing shared resource with checker pattern\n";')
print(r' _result = d3dretrace::createSharedResource(_this, ReturnedInterface, ppResource);')
self.checkResult(interface, method)
return
if interface.name.startswith('ID3D11Device') and method.name == 'OpenSharedResource':
# Some applications (e.g., video playing in IE11) create shared resources within the same process.
# TODO: Generalize to other OpenSharedResource variants
print(r' retrace::map<HANDLE>::const_iterator it = _shared_handle_map.find(hResource);')
print(r' if (it == _shared_handle_map.end()) {')
print(r' retrace::warning(call) << "replacing shared resource with checker pattern\n";')
print(r' _result = d3dretrace::createSharedResource(_this, ReturnedInterface, ppResource);')
self.checkResult(interface, method)
print(r' } else {')
print(r' hResource = it->second;')
Retracer.invokeInterfaceMethod(self, interface, method)
print(r' }')
return
if interface.name.startswith('ID3D11Device') and method.name.startswith('OpenSharedResource'):
print(r' retrace::warning(call) << "replacing shared resource with checker pattern\n";')
print(r' _result = d3dretrace::createSharedResource(_this, ReturnedInterface, ppResource);')
if method.name == 'OpenSharedResourceByName':
print(r' (void)lpName;')
print(r' (void)dwDesiredAccess;')
else:
print(r' (void)hResource;')
self.checkResult(interface, method)
return
if method.name == 'Map':
# Reset _DO_NOT_WAIT flags. Otherwise they may fail, and we have no
# way to cope with it (other than retry).
mapFlagsArg = method.getArgByName('MapFlags')
for flag in mapFlagsArg.type.values:
if flag.endswith('_MAP_FLAG_DO_NOT_WAIT'):
print(r' MapFlags &= ~%s;' % flag)
if method.name.startswith('UpdateSubresource'):
# The D3D10 debug layer is buggy (or at least inconsistent with the
# runtime), as it seems to estimate and enforce the data size based on the
# SrcDepthPitch, even for non 3D textures, but in some traces
# SrcDepthPitch is garbagge for non 3D textures.
# XXX: It also seems to expect padding bytes at the end of the last
# row, but we never record (or allocate) those...
print(r' if (retrace::debug >= 2 && pDstBox && pDstBox->front == 0 && pDstBox->back == 1) {')
print(r' SrcDepthPitch = 0;')
print(r' }')
if method.name == 'SetGammaControl':
# This method is only supported while in full-screen mode
print(r' if (retrace::forceWindowed) {')
print(r' return;')
print(r' }')
if method.name == 'GetData':
print(r' pData = DataSize ? _allocator.alloc(DataSize) : nullptr;')
print(r' do {')
self.doInvokeInterfaceMethod(interface, method)
print(r' GetDataFlags = 0; // Prevent infinite loop')
print(r' } while (_result == S_FALSE);')
self.checkResult(interface, method)
print(r' return;')
# We don't capture multiple processes, so don't wait keyed mutexes to
# avoid deadlocks. However it's important to try honouring the
# IDXGIKeyedMutex interfaces so that single processes using multiple
# contexts work reliably, by ensuring pending commands get flushed.
if method.name == 'AcquireSync':
print(r' dwMilliseconds = 0;')
Retracer.invokeInterfaceMethod(self, interface, method)
# process events after presents
if interface.name.startswith('IDXGISwapChain') and method.name.startswith('Present'):
print(r' d3dretrace::processEvents();')
if method.name in ('Map', 'Unmap'):
if interface.name.startswith('ID3D11DeviceContext'):
print(' void * & _pbData = g_Maps[_this][SubresourceKey(pResource, Subresource)];')
else:
subresourceArg = method.getArgByName('Subresource')
if subresourceArg is None:
print(' UINT Subresource = 0;')
print(' void * & _pbData = g_Maps[0][SubresourceKey(_this, Subresource)];')
if method.name == 'Map':
print(' _MAP_DESC _MapDesc;')
print(' _getMapDesc(_this, %s, _MapDesc);' % ', '.join(method.argNames()))
print(' size_t _MappedSize = _MapDesc.Size;')
print(' if (_MapDesc.Size) {')
print(' _pbData = _MapDesc.pData;')
if interface.name.startswith('ID3D11DeviceContext'):
# Prevent false warnings on 1D and 2D resources, since the
# pitches are often junk there...
print(' _normalizeMap(pResource, pMappedResource);')
else:
print(' _pbData = _MapDesc.pData;')
print(' } else {')
print(' return;')
print(' }')
if method.name == 'Unmap':
print(' if (_pbData) {')
print(' retrace::delRegionByPointer(_pbData);')
print(' _pbData = 0;')
print(' }')
if interface.name.startswith('ID3D11VideoContext'):
if method.name == 'GetDecoderBuffer':
print(' if (*ppBuffer && *pBufferSize) {')
print(' g_Maps[nullptr][SubresourceKey(_this, Type)] = *ppBuffer;')
print(' }')
if method.name == 'ReleaseDecoderBuffer':
print(' SubresourceKey _mappingKey(_this, Type);')
print(' void *_pBuffer = g_Maps[nullptr][_mappingKey];')
print(' if (_pBuffer) {')
print(' retrace::delRegionByPointer(_pBuffer);')
print(' g_Maps[nullptr][_mappingKey] = 0;')
print(' }')
# Attach shader byte code for lookup
if 'pShaderBytecode' in method.argNames():
ppShader = method.args[-1]
assert ppShader.output
print(r' if (retrace::dumpingState && SUCCEEDED(_result)) {')
print(r' (*%s)->SetPrivateData(d3dstate::GUID_D3DSTATE, BytecodeLength, pShaderBytecode);' % ppShader.name)
print(r' }')
if method.name == 'CreateBuffer':
ppBuffer = method.args[-1]
print(r' if (retrace::dumpingState && SUCCEEDED(_result)) {')
print(r' char label[32];')
print(r' _snprintf(label, sizeof label, "0x%%llx", call.arg(%u).toArray()->values[0]->toUIntPtr());' % ppBuffer.index)
print(r' (*%s)->SetPrivateData(WKPDID_D3DDebugObjectName, strlen(label)+1, label);' % ppBuffer.name)
print(r' }')
def retraceInterfaceMethodBody(self, interface, method):
Retracer.retraceInterfaceMethodBody(self, interface, method)
# Add pitch swizzling information to the region
if method.name == 'Map' and interface.name not in ('ID3D10Buffer', 'ID3D10Texture1D'):
if interface.name.startswith('ID3D11DeviceContext'):
outArg = method.getArgByName('pMappedResource')
memberNames = ('pData', 'RowPitch', 'DepthPitch')
elif interface.name.startswith('ID3D10'):
outArg = method.args[-1]
memberNames = ('pData', 'RowPitch', 'DepthPitch')
elif interface.name == 'IDXGISurface':
outArg = method.getArgByName('pLockedRect')
memberNames = ('pBits', 'Pitch', None)
else:
raise NotImplementedError
struct = outArg.type.type
dataMemberName, rowPitchMemberName, depthPitchMemberName = memberNames
dataMemberIndex = struct.getMemberByName(dataMemberName)
rowPitchMemberIndex = struct.getMemberByName(rowPitchMemberName)
print(r' if (_pbData && %s->%s != 0) {' % (outArg.name, rowPitchMemberName))
print(r' const trace::Array *_%s = call.arg(%u).toArray();' % (outArg.name, outArg.index))
print(r' if (%s) {' % outArg.name)
print(r' const trace::Struct *_struct = _%s->values[0]->toStruct();' % (outArg.name))
print(r' if (_struct) {')
print(r' unsigned long long traceAddress = _struct->members[%u]->toUIntPtr();' % dataMemberIndex)
print(r' int traceRowPitch = _struct->members[%u]->toSInt();' % rowPitchMemberIndex)
print(r' int realRowPitch = %s->%s;' % (outArg.name, rowPitchMemberName))
print(r' if (realRowPitch && traceRowPitch != realRowPitch) {')
print(r' retrace::setRegionPitch(traceAddress, 2, traceRowPitch, realRowPitch);')
print(r' }')
try:
depthPitchMemberIndex = struct.getMemberByName(depthPitchMemberName)
except ValueError:
assert len(struct.members) < 3
pass
else:
assert depthPitchMemberName == 'DepthPitch'
print(r' if (%s->DepthPitch) {' % outArg.name)
print(r' retrace::checkMismatch(call, "DepthPitch", _struct->members[%u], %s->DepthPitch);' % (struct.getMemberByName('DepthPitch'), outArg.name))
print(r' }')
print(r' }')
print(r' }')
print(r' }')
def checkResult(self, interface, methodOrFunction):
if interface is not None and interface.name == 'IDXGIKeyedMutex' and methodOrFunction.name == 'AcquireSync':
print(r' if (_result != S_OK) {')
print(r' retrace::failed(call, _result);')
self.handleFailure(interface, methodOrFunction)
print(r' }')
return
return Retracer.checkResult(self, interface, methodOrFunction)
def extractArg(self, function, arg, arg_type, lvalue, rvalue):
# Set object names
if function.name == 'SetPrivateData' and arg.name == 'pData':
iid = function.args[0].name
print(r' if (%s != WKPDID_D3DDebugObjectName) {' % iid)
print(r' return;')
print(r' }')
# Interpret argument as string
Retracer.extractArg(self, function, arg, LPCSTR, lvalue, rvalue)
print(r' if (!pData) {')
print(r' return;')
print(r' }')
print(r' assert(DataSize >= strlen((const char *)pData));')
print(r' // Some applications include the trailing zero terminator in the data')
print(r' DataSize = strlen((const char *)pData);')
return
Retracer.extractArg(self, function, arg, arg_type, lvalue, rvalue)
def main():
print(r'#define INITGUID')
print()
print(r'#include <string.h>')
print()
print(r'#include <iostream>')
print()
print(r'#include "d3dretrace.hpp"')
print(r'#include "os_version.hpp"')
print()
print(r'#include "d3dretrace_dxgi.hpp"')
print(r'#include "d3d10imports.hpp"')
print(r'#include "d3d10size.hpp"')
print(r'#include "d3d10state.hpp"')
print(r'#include "d3d11imports.hpp"')
print(r'#include "d3d11size.hpp"')
print(r'#include "dcompimports.hpp"')
print(r'#include "d3dstate.hpp"')
print(r'#include "d3d9imports.hpp" // D3DERR_WASSTILLDRAWING')
print()
print('''static d3dretrace::D3DDumper<IDXGISwapChain> dxgiDumper;''')
print('''static d3dretrace::D3DDumper<ID3D10Device> d3d10Dumper;''')
print('''static d3dretrace::D3DDumper<ID3D11DeviceContext> d3d11Dumper;''')
print()
api = API()
api.addModule(dxgi)
api.addModule(d3d10)
api.addModule(d3d10_1)
api.addModule(d3d11)
api.addModule(dcomp)
retracer = D3DRetracer()
retracer.retraceApi(api)
if __name__ == '__main__':
main()
|
|
import unittest
import string
import sys
import os
import tempfile
import zipfile
import shutil
import copy
from pynhost import ruleparser
from pynhost import matching
class TestRuleMatching(unittest.TestCase):
@classmethod
def setUpClass(cls):
pass
def test_overflow1(self):
rule = ruleparser.Rule('hello')
words = 'hello world'.split()
self.assertEqual(list(matching.get_rule_match(rule, words).matched_words), ['hello'])
self.assertEqual(list(matching.get_rule_match(rule, words).remaining_words), ['world'])
def test_overflow2(self):
rule = ruleparser.Rule('hello [there] (world | universe)')
words = 'hello there world how are you'.split()
self.assertEqual(list(matching.get_rule_match(rule, words).matched_words), ['hello', 'there', 'world'])
self.assertEqual(list(matching.get_rule_match(rule, words).remaining_words), ['how', 'are', 'you'])
def test_basic1(self):
rule = ruleparser.Rule('hello world')
words = 'hello world'.split()
self.assertEqual(list(matching.get_rule_match(rule, words).matched_words), ['hello', 'world'])
def test_basic2(self):
rule = ruleparser.Rule('hello [world]')
words = 'hello world'.split()
self.assertEqual(list(matching.get_rule_match(rule, words).matched_words), ['hello', 'world'])
def test_basic3(self):
rule = ruleparser.Rule('hello [world]')
words = 'hello'.split()
self.assertEqual(list(matching.get_rule_match(rule, words).matched_words), ['hello'])
def test_basic4(self):
rule = ruleparser.Rule('hello [there] world')
words = 'hello world'.split()
self.assertEqual(list(matching.get_rule_match(rule, words).matched_words), ['hello', 'world'])
def test_basic5(self):
rule = ruleparser.Rule('hello [there] world')
words = 'hello there world'.split()
self.assertEqual(list(matching.get_rule_match(rule, words).matched_words), ['hello', 'there', 'world'])
def test_basic6(self):
rule = ruleparser.Rule('hello [there] world')
words = 'hello there'.split()
self.assertIsNone(matching.get_rule_match(rule, words))
def test_basic7(self):
rule = ruleparser.Rule('hello [there] [world]')
words = 'hello there'.split()
self.assertEqual(list(matching.get_rule_match(rule, words).matched_words), ['hello', 'there'])
def test_basic8(self):
rule = ruleparser.Rule('hello [there world]')
words = 'hello there world'.split()
self.assertEqual(list(matching.get_rule_match(rule, words).matched_words), ['hello', 'there', 'world'])
def test_basic9(self):
rule = ruleparser.Rule('[there world]')
words = 'there world'.split()
self.assertEqual(list(matching.get_rule_match(rule, words).matched_words), ['there', 'world'])
def test_list1(self):
rule = ruleparser.Rule('(hello | goodbye)')
words = 'hello'.split()
self.assertEqual(list(matching.get_rule_match(rule, words).matched_words), ['hello'])
def test_list2(self):
rule = ruleparser.Rule('(hello | goodbye)')
words = 'goodbye'.split()
self.assertEqual(list(matching.get_rule_match(rule, words).matched_words), ['goodbye'])
def test_list3(self):
rule = ruleparser.Rule('(hello | goodbye)')
words = 'hello goodbye'.split()
self.assertEqual(list(matching.get_rule_match(rule, words).matched_words), ['hello'])
self.assertEqual(list(matching.get_rule_match(rule, words).remaining_words), ['goodbye'])
def test_list4(self):
rule = ruleparser.Rule('hello (world | [enormous] universe )')
words = 'hello world'.split()
self.assertEqual(list(matching.get_rule_match(rule, words).matched_words), ['hello', 'world'])
def test_list5(self):
rule = ruleparser.Rule('hello (world | [enormous] universe )')
words = 'hello universe'.split()
self.assertEqual(list(matching.get_rule_match(rule, words).matched_words), ['hello', 'universe'])
def test_list6(self):
rule = ruleparser.Rule('hello (world | [enormous] universe )')
words = 'hello enormous universe'.split()
self.assertEqual(list(matching.get_rule_match(rule, words).matched_words), ['hello', 'enormous', 'universe'])
def test_list7(self):
rule = ruleparser.Rule('hello (world | [enormous] universe )')
words = 'hello world enormous universe'.split()
self.assertEqual(list(matching.get_rule_match(rule, words).matched_words), ['hello', 'world'])
self.assertEqual(list(matching.get_rule_match(rule, words).remaining_words), ['enormous', 'universe'])
def test_list8(self):
rule = ruleparser.Rule('(hello world)')
words = 'hello world'.split()
self.assertEqual(list(matching.get_rule_match(rule, words).matched_words), ['hello', 'world'])
def test_num0(self):
rule = ruleparser.Rule('<num>')
words = 'four'.split()
self.assertEqual(list(matching.get_rule_match(rule, words).matched_words), ['4'])
def test_num1(self):
rule = ruleparser.Rule('<num>')
words = '4'.split()
self.assertEqual(list(matching.get_rule_match(rule, words).matched_words), ['4'])
def test_num2(self):
rule = ruleparser.Rule('<num>')
words = '-4.21'.split()
self.assertEqual(list(matching.get_rule_match(rule, words).matched_words), ['-4.21'])
def test_num3(self):
rule = ruleparser.Rule('<num>')
words = 'e'.split()
self.assertIsNone(matching.get_rule_match(rule, words))
def test_num4(self):
rule = ruleparser.Rule('range <num>')
words = 'range 83'.split()
self.assertEqual(list(matching.get_rule_match(rule, words).matched_words), ['range', '83'])
def test_num5(self):
rule = ruleparser.Rule('range <num>[through <num>]')
words = 'range 83'.split()
self.assertEqual(list(matching.get_rule_match(rule, words).matched_words), ['range', '83'])
def test_num6(self):
rule = ruleparser.Rule('range <num>[through <num>]')
words = 'range 83 through'.split()
self.assertEqual(list(matching.get_rule_match(rule, words).matched_words), ['range', '83'])
self.assertEqual(list(matching.get_rule_match(rule, words).remaining_words), ['through'])
def test_num10(self):
rule = ruleparser.Rule('range banana [through waffle]')
words = 'range banana through'.split()
self.assertEqual(list(matching.get_rule_match(rule, words).matched_words), ['range', 'banana'])
self.assertEqual(list(matching.get_rule_match(rule, words).remaining_words), ['through'])
def test_num7(self):
rule = ruleparser.Rule('range <num>[through <num>]')
words = 'range 83 through -100'.split()
self.assertEqual(list(matching.get_rule_match(rule, words).matched_words), ['range', '83', 'through', '-100'])
def test_num8(self):
rule = ruleparser.Rule('range <num>[through <num>[step <num>]]')
words = 'range 83 through -100 step -4'.split()
self.assertEqual(list(matching.get_rule_match(rule, words).matched_words), ['range', '83', 'through', '-100', 'step', '-4'])
def test_num9(self):
rule = ruleparser.Rule('<num>')
words = 'too'.split()
self.assertEqual(list(matching.get_rule_match(rule, words).matched_words), ['2'])
def test_any1(self):
rule = ruleparser.Rule('<any>')
words = 'yowza'.split()
self.assertEqual(list(matching.get_rule_match(rule, words).matched_words), ['yowza'])
def test_any2(self):
rule = ruleparser.Rule('<any>')
words = 'yowza wowza'.split()
self.assertEqual(list(matching.get_rule_match(rule, words).matched_words), ['yowza'])
self.assertEqual(list(matching.get_rule_match(rule, words).remaining_words), ['wowza'])
def test_any3(self):
rule = ruleparser.Rule('hello <any>')
words = 'hello daisy'.split()
self.assertTrue(list(matching.get_rule_match(rule, words).matched_words), ['hello', 'daisy'])
def test_any4(self):
rule = ruleparser.Rule('hello <any> <3>')
words = 'hello dear omnipotent leader'.split()
self.assertEqual(list(matching.get_rule_match(rule, words).matched_words), ['hello', 'dear', 'omnipotent', 'leader'])
def test_any5(self):
rule = ruleparser.Rule('hello <any> <3>')
words = 'hello dear leader'.split()
self.assertIsNone(matching.get_rule_match(rule, words))
def test_any6(self):
rule = ruleparser.Rule('hello <any> <3->')
words = 'hello dear leader how are you today'.split()
self.assertEqual(list(matching.get_rule_match(rule, words).matched_words), ['hello', 'dear', 'leader', 'how', 'are', 'you', 'today'])
def test_any7(self):
rule = ruleparser.Rule('hello <any> <0-3> are you today')
words = 'hello dear leader how are you today'.split()
self.assertEqual(list(matching.get_rule_match(rule, words).matched_words), ['hello', 'dear', 'leader', 'how', 'are', 'you', 'today'])
def test_any8(self):
rule = ruleparser.Rule('hello <any> <0-3>')
words = 'hello dear leader'.split()
self.assertEqual(list(matching.get_rule_match(rule, words).matched_words), ['hello', 'dear', 'leader'])
def test_any9(self):
rule = ruleparser.Rule('word <any> <1->')
words = 'word'.split()
self.assertIsNone(matching.get_rule_match(rule, words))
def test_any10(self):
rule = ruleparser.Rule('word <any> <0-1>')
words = 'word'.split()
self.assertEqual(list(matching.get_rule_match(rule, words).matched_words), ['word'])
def test_any11(self):
rule = ruleparser.Rule('select [(blue | fish) whale]')
words = 'select fish'.split()
self.assertEqual(list(matching.get_rule_match(rule, words).matched_words), ['select'])
self.assertEqual(list(matching.get_rule_match(rule, words).remaining_words), ['fish'])
def test_homophone1(self):
rule = ruleparser.Rule('hello <hom_line>')
words = 'hello line'.split()
self.assertEqual(list(matching.get_rule_match(rule, words).matched_words), ['hello', 'line'])
def test_homophone2(self):
rule = ruleparser.Rule('hello <hom_line>')
words = 'hello wine'.split()
self.assertEqual(list(matching.get_rule_match(rule, words).matched_words), ['hello', 'line'])
def test_homophone3(self):
rule = ruleparser.Rule('hello <hom_line>')
words = 'hello why n'.split()
self.assertEqual(list(matching.get_rule_match(rule, words).matched_words), ['hello', 'line'])
def test_homophone_not_in_list1(self):
rule = ruleparser.Rule('hello <hom_phone>')
words = 'hello phone'.split()
self.assertEqual(list(matching.get_rule_match(rule, words).matched_words), ['hello', 'phone'])
def test_homophone_not_in_list2(self):
rule = ruleparser.Rule('hello <hom_phone>')
words = 'hello bone'.split()
self.assertIsNone(matching.get_rule_match(rule, words))
def test_mix1(self):
rule = ruleparser.Rule('(bend cat | cat)')
words = 'cat'.split()
self.assertEqual(list(matching.get_rule_match(rule, words).matched_words), ['cat'])
def test_mix2(self):
rule = ruleparser.Rule('[<any>] test')
words = 'cat'.split()
self.assertIsNone(matching.get_rule_match(rule, words))
def test_mix3(self):
rule = ruleparser.Rule('(camel | score | title | upper) <any> <1->')
words = 'upper hello world'.split()
self.assertEqual(list(matching.get_rule_match(rule, words).matched_words), ['upper', 'hello', 'world'])
def test_mix4(self):
rule = ruleparser.Rule('(test | outer hello | outer)')
words = 'outer'.split()
self.assertEqual(list(matching.get_rule_match(rule, words).matched_words), ['outer'])
def test_mix5(self):
rule = ruleparser.Rule('<hom_poke> <num>')
words = 'poke 12'.split()
self.assertEqual(list(matching.get_rule_match(rule, words).matched_words), ['poke', '12'])
def test_end1(self):
rule = ruleparser.Rule('hello world <end>')
words = 'hello world'.split()
rule_match = matching.get_rule_match(rule, words)
self.assertEqual(list(rule_match.matched_words), ['hello', 'world'])
def test_end2(self):
rule = ruleparser.Rule('hello world <end>')
words = 'hello world goodbye'.split()
rule_match = matching.get_rule_match(rule, words)
self.assertIsNone(rule_match)
def test_wildcard1(self):
rule = ruleparser.Rule('<any>')
words = 'hello'.split()
rule_match = matching.get_rule_match(rule, words)
self.assertEqual(list(rule_match.matched_words), ['hello'])
def test_wildcard2(self):
rule = ruleparser.Rule('hello <any> world')
words = 'hello large world'.split()
rule_match = matching.get_rule_match(rule, words)
self.assertEqual(list(rule_match.matched_words), ['hello', 'large', 'world'])
def test_repetition1(self):
rule = ruleparser.Rule('hello <any> <2>')
words = 'hello large world'.split()
rule_match = matching.get_rule_match(rule, words)
self.assertEqual(list(rule_match.matched_words), ['hello', 'large', 'world'])
def test_repetition2(self):
rule = ruleparser.Rule('hello <any> <2->')
words = 'hello large world'.split()
rule_match = matching.get_rule_match(rule, words)
self.assertEqual(list(rule_match.matched_words), ['hello', 'large', 'world'])
def test_repetition3(self):
rule = ruleparser.Rule('hello large <any> <0-2> world')
words = 'hello large world'.split()
rule_match = matching.get_rule_match(rule, words)
self.assertEqual(list(rule_match.matched_words), ['hello', 'large', 'world'])
def test_repetition4(self):
rule = ruleparser.Rule('<hom_line> this is a test')
words = 'line this is a test'.split()
rule_match = matching.get_rule_match(rule, words)
self.assertEqual(list(rule_match.matched_words), ['line', 'this', 'is', 'a', 'test'])
def test_num_range1(self):
rule = ruleparser.Rule('<num_44>')
words = '32'.split()
rule_match = matching.get_rule_match(rule, words)
self.assertEqual(list(rule_match.matched_words), ['32'])
def test_num_range2(self):
rule = ruleparser.Rule('<num_5_44>')
words = 'six'.split()
rule_match = matching.get_rule_match(rule, words)
self.assertEqual(list(rule_match.matched_words), ['6'])
def test_num_range3(self):
rule = ruleparser.Rule('<num_5_44>')
words = '44'.split()
self.assertIsNone(matching.get_rule_match(rule, words))
if __name__ == '__main__':
unittest.main()
|
|
# Scramble Solver v0.1
# ====================
import solver
import os
import time
import csv
from sys import argv
LOG_STATS = True
class StatGetter:
def __init__(self):
self.sourcedir = os.getcwd()
self.datadir = self.sourcedir + "/solutiondata/"
os.chdir(self.datadir)
self.statfile = open('stats.csv', 'a')
self.csv_writer = csv.writer(self.statfile)
os.chdir(self.sourcedir)
def logstats(self, data):
self.csv_writer.writerow(data)
def close(self):
self.statfile.close()
class SolutionFile:
def __init__(self):
self.makefiles()
def makefiles(self):
self.maindir = os.getcwd()
self.datadir = self.maindir + "/solutiondata/"
os.chdir(self.datadir)
#create output files
self.wordsbig = file('words-big.txt' , 'w')
self.wordsmed = file('words-med.txt', 'w')
self.wordssml = file('words-sml.txt', 'w')
files = os.listdir(self.datadir)
for f in files:
if f == 'solution.txt':
os.remove(self.datadir + "solution.txt")
print "Old solution file removed"
# change back to src directory
os.chdir(self.maindir)
def writedata(self, data):
self.solutionfile.write(data)
# Functions to write found words to respective files based on size
# NOTE: probably want to eventually move this all to a post processing step
# that can do a finer sort, collect stats, randomize, etc.
def writebig(self, data):
self.wordsbig.write(data)
self.wordsbig.write('\n')
def writemed(self, data):
self.wordsmed.write(data)
self.wordsmed.write('\n')
def writesml(self, data):
self.wordssml.write(data)
self.wordssml.write('\n')
def catwords(self):
# close and reopen output files in read mode & create solutionfile
self.close()
os.chdir(self.datadir)
self.solutionfile = file('solution.txt', 'w')
self.wordsbig = file('words-big.txt' , 'r')
self.wordsmed = file('words-med.txt', 'r')
self.wordssml = file('words-sml.txt', 'r')
self.wordsbig.seek(0, 2)
bigend = self.wordsbig.tell()
self.wordsbig.seek(0)
self.wordsmed.seek(0, 2)
medend = self.wordsmed.tell()
self.wordsmed.seek(0)
self.wordssml.seek(0, 2)
smlend = self.wordssml.tell()
self.wordssml.seek(0)
while self.wordsbig.tell() < bigend:
data = self.wordsbig.readline()
data.rstrip('\n')
self.writedata(data)
while self.wordsmed.tell() < medend:
data = self.wordsmed.readline()
data.rstrip('\n')
self.writedata(data)
while self.wordssml.tell() < smlend:
data = self.wordssml.readline()
data.rstrip('\n')
self.writedata(data)
self.close()
self.solutionfile.close()
# lets remove the intermediate files here
try:
os.remove('words-big.txt')
os.remove('words-med.txt')
os.remove('words-sml.txt')
print "[+] All files succesfully closed"
except:
print "[-] Intermediate file delete FAILED!"
# Close all output files in one clean shot
def close(self):
self.wordsbig.close()
self.wordsmed.close()
self.wordssml.close()
class WordDictionary:
def __init__(self):
self.dictfile = file('dictionary.txt', 'r')
def getword(self):
word = self.dictfile.readline()
return word.rstrip('\n')
def jump(self, index):
self.dictfile.seek(index)
def close(self):
self.dictfile.close()
def get_input():
print ""
input_letters = raw_input("Enter Letters: ")
input_letters.upper()
return input_letters
def check_letters(letter_string):
if len(letter_string) != 16:
return False
else:
return True
def make_game_array(input_letters):
game = []
count = 0
for i in range(4):
row = []
for j in range(4):
if input_letters[count] == 'Q':
row.append('QU')
count += 1
else:
row.append(input_letters[count])
count = count + 1
game.append(row)
return game
def print_game(game_array):
print ""
print " ------------------"
for row in game_array:
print row
print " ------------------"
print ""
#-----
class GameEngine:
def __init__(self, inputstr=None):
if inputstr != None:
self.user_input = inputstr.lower()
else:
self.user_input = get_input()
if check_letters(self.user_input) != True:
print "[-] Input Error!"
exit()
else:
self.game = make_game_array(self.user_input)
print "[+] Game array created!"
self.dictionary = WordDictionary()
print "[+] Dictionary Loaded!"
self.solution = SolutionFile()
print "[+] Solution file loaded!"
if LOG_STATS:
self.stats = StatGetter()
print "[+] Stats file loaded!"
print_game(self.game)
self.run()
def run(self):
self.worker = solver.Worker(self.dictionary, self.game)
print "[+] Scramble solver starting!"
for i in range(4):
for j in range(4):
time_start = time.time()
work_coord = (i, j)
work_letter = self.game[i][j]
work = solver.ChainRoot(work_letter, work_coord)
self.worker.work.append(work)
self.worker.process_work()
n = (i * 4) + j + 1
progress = (n / 16.0) * 100.0
outstr = "[+] %3.1f" % progress
outstr += "%"
outstr += " done"
time_done = time.time()
time_elapsed = time_done - time_start
print '\n' + outstr
print "Found %s Words" % len(self.worker.found_words)
print "Cell time: %4.2f" % time_elapsed
print "%s chains processed\n" % self.worker.workcount
if LOG_STATS:
self.stats.logstats([n, work_letter,
len(self.worker.found_words), self.worker.workcount,
time_elapsed])
big_count = 0
med_count = 0
sml_count = 0
for chain in self.worker.found_words:
data = chain[1]
datastr = ""
wordlength = 0
for c in data:
datastr = datastr + str(c[0]) + ', ' + str(c[1]) + '-'
wordlength += 1
if wordlength > 6:
self.solution.writebig(datastr)
big_count += 1
elif wordlength > 3:
self.solution.writemed(datastr)
med_count += 1
else:
self.solution.writesml(datastr)
sml_count += 1
print "Found %s big words, %s medium words, and %s small words" % (big_count, med_count, sml_count)
print "[+] Finalizing..."
self.solution.catwords()
self.solution.close()
self.dictionary.close()
self.stats.close()
print "[+] All solution files successfully written!"
print "Finished!"
if __name__=='__main__':
if len(argv) > 1:
newgame = GameEngine(inputstr=argv[1])
else:
newgame = GameEngine()
|
|
# NEEDS FIXING
# -*- coding: utf-8 -*-
'''
fantastic Add-on
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>.
'''
import re,urllib,urlparse,hashlib,random,string,json,base64,sys
from resources.lib.modules import cleantitle
from resources.lib.modules import client
from resources.lib.modules import cache
from resources.lib.modules import directstream
from resources.lib.modules import jsunfuck
CODE = '''def retA():
class Infix:
def __init__(self, function):
self.function = function
def __ror__(self, other):
return Infix(lambda x, self=self, other=other: self.function(other, x))
def __or__(self, other):
return self.function(other)
def __rlshift__(self, other):
return Infix(lambda x, self=self, other=other: self.function(other, x))
def __rshift__(self, other):
return self.function(other)
def __call__(self, value1, value2):
return self.function(value1, value2)
def my_add(x, y):
try: return x + y
except Exception: return str(x) + str(y)
x = Infix(my_add)
return %s
param = retA()'''
class source:
def __init__(self):
self.priority = 1
self.language = ['en']
self.domains = ['solarmoviez.to']
self.base_link = 'https://solarmoviez.to'
self.search_link = '/movie/search/%s.html'
self.info_link = '/ajax/movie_info/%s.html?is_login=false'
self.server_link = '/ajax/v4_movie_episodes/%s'
self.embed_link = '/ajax/movie_embed/%s'
self.token_link = '/ajax/movie_token?eid=%s&mid=%s'
self.source_link = '/ajax/movie_sources/%s?x=%s&y=%s'
def matchAlias(self, title, aliases):
try:
for alias in aliases:
if cleantitle.get(title) == cleantitle.get(alias['title']):
return True
except:
return False
def movie(self, imdb, title, localtitle, aliases, year):
try:
aliases.append({'country': 'us', 'title': title})
url = {'imdb': imdb, 'title': title, 'year': year, 'aliases': aliases}
url = urllib.urlencode(url)
return url
except:
return
def tvshow(self, imdb, tvdb, tvshowtitle, localtvshowtitle, aliases, year):
try:
aliases.append({'country': 'us', 'title': tvshowtitle})
url = {'imdb': imdb, 'tvdb': tvdb, 'tvshowtitle': tvshowtitle, 'year': year, 'aliases': aliases}
url = urllib.urlencode(url)
return url
except:
return
def episode(self, url, imdb, tvdb, title, premiered, season, episode):
try:
if url == None: return
url = urlparse.parse_qs(url)
url = dict([(i, url[i][0]) if url[i] else (i, '') for i in url])
url['title'], url['premiered'], url['season'], url['episode'] = title, premiered, season, episode
url = urllib.urlencode(url)
return url
except:
return
def searchShow(self, title, season, aliases, headers):
try:
title = cleantitle.normalize(title)
search = '%s Season %01d' % (title, int(season))
url = urlparse.urljoin(self.base_link, self.search_link % urllib.quote_plus(cleantitle.getsearch(search)))
r = client.request(url, headers=headers, timeout='15')
r = client.parseDOM(r, 'div', attrs={'class': 'ml-item'})
r = zip(client.parseDOM(r, 'a', ret='href'), client.parseDOM(r, 'a', ret='title'))
r = [(i[0], i[1], re.findall('(.*?)\s+-\s+Season\s+(\d)', i[1])) for i in r]
r = [(i[0], i[1], i[2][0]) for i in r if len(i[2]) > 0]
url = [i[0] for i in r if self.matchAlias(i[2][0], aliases) and i[2][1] == season][0]
return url
except:
return
def searchMovie(self, title, year, aliases, headers):
try:
title = cleantitle.normalize(title)
url = urlparse.urljoin(self.base_link, self.search_link % urllib.quote_plus(cleantitle.getsearch(title)))
r = client.request(url, headers=headers, timeout='15')
r = client.parseDOM(r, 'div', attrs={'class': 'ml-item'})
r = zip(client.parseDOM(r, 'a', ret='href'), client.parseDOM(r, 'a', ret='title'))
results = [(i[0], i[1], re.findall('\((\d{4})', i[1])) for i in r]
try:
r = [(i[0], i[1], i[2][0]) for i in results if len(i[2]) > 0]
url = [i[0] for i in r if self.matchAlias(i[1], aliases) and (year == i[2])][0]
except:
url = None
pass
if (url == None):
url = [i[0] for i in results if self.matchAlias(i[1], aliases)][0]
return url
except:
return
def sources(self, url, hostDict, hostprDict):
try:
sources = []
if url is None: return sources
data = urlparse.parse_qs(url)
data = dict([(i, data[i][0]) if data[i] else (i, '') for i in data])
aliases = eval(data['aliases'])
headers = {}
if 'tvshowtitle' in data:
episode = int(data['episode'])
url = self.searchShow(data['tvshowtitle'], data['season'], aliases, headers)
else:
episode = 0
url = self.searchMovie(data['title'], data['year'], aliases, headers)
mid = re.findall('-(\d+)', url)[-1]
try:
headers = {'Referer': url}
u = urlparse.urljoin(self.base_link, self.server_link % mid)
r = client.request(u, headers=headers, XHR=True)
r = json.loads(r)['html']
r = client.parseDOM(r, 'div', attrs = {'class': 'pas-list'})
ids = client.parseDOM(r, 'li', ret='data-id')
servers = client.parseDOM(r, 'li', ret='data-server')
labels = client.parseDOM(r, 'a', ret='title')
r = zip(ids, servers, labels)
for eid in r:
try:
try:
ep = re.findall('episode.*?(\d+).*?',eid[2].lower())[0]
except:
ep = 0
if (episode == 0) or (int(ep) == episode):
url = urlparse.urljoin(self.base_link, self.token_link % (eid[0], mid))
script = client.request(url)
if '$_$' in script:
params = self.uncensored1(script)
elif script.startswith('[]') and script.endswith('()'):
params = self.uncensored2(script)
elif '_x=' in script:
x = re.search('''_x=['"]([^"']+)''', script).group(1)
y = re.search('''_y=['"]([^"']+)''', script).group(1)
params = {'x': x, 'y': y}
else:
raise Exception()
u = urlparse.urljoin(self.base_link, self.source_link % (eid[0], params['x'], params['y']))
r = client.request(u, XHR=True)
url = json.loads(r)['playlist'][0]['sources']
url = [i['file'] for i in url if 'file' in i]
url = [directstream.googletag(i) for i in url]
url = [i[0] for i in url if i]
for s in url:
sources.append({'source': 'gvideo', 'quality': s['quality'], 'language': 'en',
'url': s['url'], 'direct': True, 'debridonly': False})
except:
pass
except:
pass
return sources
except:
return sources
def resolve(self, url):
try:
if self.embed_link in url:
result = client.request(url, XHR=True)
url = json.loads(result)['embed_url']
return url
try:
for i in range(3):
u = directstream.googlepass(url)
if not u == None: break
return u
except:
return
except:
return
def uncensored(a, b):
x = '' ; i = 0
for i, y in enumerate(a):
z = b[i % len(b) - 1]
y = int(ord(str(y)[0])) + int(ord(str(z)[0]))
x += chr(y)
x = base64.b64encode(x)
return x
def uncensored1(self, script):
try:
script = '(' + script.split("(_$$)) ('_');")[0].split("/* `$$` */")[-1].strip()
script = script.replace('(__$)[$$$]', '\'"\'')
script = script.replace('(__$)[_$]', '"\\\\"')
script = script.replace('(o^_^o)', '3')
script = script.replace('(c^_^o)', '0')
script = script.replace('(_$$)', '1')
script = script.replace('($$_)', '4')
vGlobals = {"__builtins__": None, '__name__': __name__, 'str': str, 'Exception': Exception}
vLocals = {'param': None}
exec (CODE % script.replace('+', '|x|'), vGlobals, vLocals)
data = vLocals['param'].decode('string_escape')
x = re.search('''_x=['"]([^"']+)''', data).group(1)
y = re.search('''_y=['"]([^"']+)''', data).group(1)
return {'x': x, 'y': y}
except:
pass
def uncensored2(self, script):
try:
js = jsunfuck.JSUnfuck(script).decode()
x = re.search('''_x=['"]([^"']+)''', js).group(1)
y = re.search('''_y=['"]([^"']+)''', js).group(1)
return {'x': x, 'y': y}
except:
pass
|
|
# -*- coding: utf-8 -*-
from page_objects import PageObject, PageElement, MultiPageElement
class Homepage(PageObject):
stylesheets = MultiPageElement(tag_name='link')
scripts = MultiPageElement(tag_name='script')
start_button = PageElement(name='new_game')
bank_cash = PageElement(name='cash')
app_root = PageElement(tag_name='app-root')
pool_shares_pay = PageElement(name='pool-shares-pay')
ipo_shares_pay = PageElement(name='ipo-shares-pay')
treasury_shares_pay = PageElement(name='treasury-shares-pay')
class GamePage(PageObject):
reload_game = PageElement(id_='reload')
add_player_link = PageElement(id_='add_player')
add_company_link = PageElement(id_='add_company')
display_net_worth_link = PageElement(id_='display-net-worth')
undo = PageElement(id_='undo')
redo = PageElement(id_='redo')
bank_cash = PageElement(css="#bank #cash")
bank_pool = MultiPageElement(css="#bank .pool")
pool_shares_pay = PageElement(name='pool-shares-pay')
ipo_shares_pay = PageElement(name='ipo-shares-pay')
treasury_shares_pay = PageElement(name='treasury-shares-pay')
log = MultiPageElement(css="#log div.entry")
player_name_list = MultiPageElement(css="div.player div.name")
_player_list = MultiPageElement(class_name="player")
_company_list = MultiPageElement(class_name="company")
def get_players(self):
res = []
for row in self._player_list:
res.append(Player(row, self.w))
return res
def get_companies(self):
res = []
for row in self._company_list:
res.append(Company(row, self.w))
return res
class Entity(PageObject):
_name = PageElement(css=".name", context=True)
_cash = PageElement(css=".cash", context=True)
_detail = PageElement(css=".detail", context=True)
_shares = MultiPageElement(css=".share", context=True)
_summary = PageElement(css=".row", context=True)
def __init__(self, root, *args, **kwargs):
super(Entity, self).__init__(*args, **kwargs)
self.root = root
def __getitem__(self, key):
if key == 'row':
return self.root
elif key == 'elem' or key == 'summary':
return self._summary(self.root)
elif key == 'name':
return self._name(self.root)
elif key == 'cash':
return self._cash(self.root)
elif key == 'shares':
return self._shares(self.root)
elif key == 'detail':
return self._detail(self.root)
else: # pragma: no cover
raise KeyError
class Player(Entity):
pass
class Company(Entity):
_value = PageElement(css=".value input", context=True)
_share_count = PageElement(css=".share_count", context=True)
_ipo_shares = PageElement(css=".ipo", context=True)
_bank_shares = PageElement(css=".bank", context=True)
_edit = PageElement(id_="edit", context=True)
def set_value(self, new_value):
self['value'].clear()
self['value'].send_keys(str(new_value))
def __getitem__(self, key):
if key == 'share_count':
return self._share_count(self.root)
elif key == 'ipo_shares':
return self._ipo_shares(self.root)
elif key == 'bank_shares':
return self._bank_shares(self.root)
elif key == 'value':
return self._value(self.root)
elif key == 'edit':
return self._edit(self.root)
return super(Company, self).__getitem__(key)
class AddPlayerPage(PageObject):
name = PageElement(name='name')
cash = PageElement(name='cash')
add_button = PageElement(tag_name='button')
header = PageElement(id_='title')
error_list = PageElement(css='.errorlist')
back = PageElement(id_='back')
game = PageElement(name='game')
class AddCompanyPage(PageObject):
header = PageElement(id_='title')
name = PageElement(name='name')
cash = PageElement(name='cash')
shares = PageElement(name='share_count')
add_button = PageElement(tag_name='button')
game = PageElement(name='game')
error_list = PageElement(css='.errorlist')
back = PageElement(id_='back')
text_color = MultiPageElement(name='text-color-select')
background_color = MultiPageElement(name='background-color-select')
preview = PageElement(id_='preview')
def select_text_color(self, color):
for radio in self.text_color:
if radio.get_attribute('value') == color:
radio.click()
break
def select_background_color(self, color):
for radio in self.background_color:
if radio.get_attribute('value') == color:
radio.click()
break
class TransferForm(PageObject):
amount = PageElement(name='amount')
target = MultiPageElement(name='target')
labels = MultiPageElement(css='label.transfer')
transfer_button = PageElement(name='transfer')
def select_target(self, name): # pragma: no cover
for radio in self.target:
if radio.get_attribute('id') == 'target-{}'.format(name):
radio.click()
break
else:
self.fail('Could not find {} in the transfer form'.format(name))
class ShareForm(PageObject):
shares = PageElement(name='shares')
company = MultiPageElement(css='label.company-label')
source = MultiPageElement(css='label.source')
transfer_button = PageElement(name='transfer-share')
buy_share = PageElement(id_='action-buy')
sell_share = PageElement(id_='action-sell')
action = PageElement(id_='action-text')
def select_company(self, name): # pragma: no cover
for label in self.company:
if label.get_attribute('for') == 'company-{}'.format(name):
label.click()
break
else: # pragma: no cover
raise AssertionError(
'No company called {} found in share list'.format(name))
def select_source(self, name): # pragma: no cover
for label in self.source:
if label.get_attribute('for') == 'source-{}'.format(name):
label.click()
break
else:
raise AssertionError('Could not select {}'.format(name))
class OperateForm(PageObject):
revenue = PageElement(name='revenue')
full = PageElement(name='full')
half = PageElement(name='half')
withhold = PageElement(name='withhold')
class ErrorPage(PageObject):
errors = MultiPageElement(css=".error")
close = PageElement(css='.close')
class NetWorthPopup(PageObject):
popup = PageElement(id_='net-worth')
background = PageElement(css='.background')
def value(self, player, row):
field_name = 'value-{}-{}'.format(player, row)
return self.popup.find_element_by_id(field_name)
def company_row(self, company):
field_id = 'row-{}'.format(company)
return self.popup.find_element_by_id(field_id)
class EditCompanyPage(PageObject):
header = PageElement(id_='title')
name = PageElement(name='name')
shares = PageElement(name='share_count')
text_color = MultiPageElement(name='text-color-select')
background_color = MultiPageElement(name='background-color-select')
edit_button = PageElement(tag_name='button')
preview = PageElement(id_='preview')
error_list = PageElement(css='.errorlist')
back = PageElement(id_='back')
def select_text_color(self, color):
for radio in self.text_color:
if radio.get_attribute('value') == color:
radio.click()
break
def select_background_color(self, color):
for radio in self.background_color:
if radio.get_attribute('value') == color:
radio.click()
break
|
|
# =========================================================================================
# Implementation of "Show, Attend and Tell: Neural Caption Generator With Visual Attention".
# There are some notations.
# N is batch size.
# L is spacial size of feature vector (196).
# D is dimension of image feature vector (512).
# T is the number of time step which is equal to caption's length-1 (16).
# V is vocabulary size (about 10000).
# M is dimension of word vector which is embedding size (default is 512).
# H is dimension of hidden state (default is 1024).
# =========================================================================================
from __future__ import division
import tensorflow as tf
class CaptionGenerator(object):
def __init__(self, word_to_idx, dim_feature=[196, 512], dim_embed=512, dim_hidden=1024, n_time_step=16,
prev2out=True, ctx2out=True, alpha_c=0.0, selector=True, dropout=True):
"""
Args:
word_to_idx: word-to-index mapping dictionary.
dim_feature: (optional) Dimension of vggnet19 conv5_3 feature vectors.
dim_embed: (optional) Dimension of word embedding.
dim_hidden: (optional) Dimension of all hidden state.
n_time_step: (optional) Time step size of LSTM.
prev2out: (optional) previously generated word to hidden state. (see Eq (7) for explanation)
ctx2out: (optional) context to hidden state (see Eq (7) for explanation)
alpha_c: (optional) Doubly stochastic regularization coefficient. (see Section (4.2.1) for explanation)
selector: (optional) gating scalar for context vector. (see Section (4.2.1) for explanation)
dropout: (optional) If true then dropout layer is added.
"""
self.word_to_idx = word_to_idx
self.idx_to_word = {i: w for w, i in word_to_idx.iteritems()}
self.prev2out = prev2out
self.ctx2out = ctx2out
self.alpha_c = alpha_c
self.selector = selector
self.dropout = dropout
self.V = len(word_to_idx)
self.L = dim_feature[0]
self.D = dim_feature[1]
self.M = dim_embed
self.H = dim_hidden
self.T = n_time_step
self._start = word_to_idx['<START>']
self._null = word_to_idx['<NULL>']
self.weight_initializer = tf.contrib.layers.xavier_initializer()
self.const_initializer = tf.constant_initializer(0.0)
self.emb_initializer = tf.random_uniform_initializer(minval=-1.0, maxval=1.0)
# Place holder for features and captions
self.features = tf.placeholder(tf.float32, [None, self.L, self.D])
self.captions = tf.placeholder(tf.int32, [None, self.T + 1])
def _get_initial_lstm(self, features):
with tf.variable_scope('initial_lstm'):
features_mean = tf.reduce_mean(features, 1)
w_h = tf.get_variable('w_h', [self.D, self.H], initializer=self.weight_initializer)
b_h = tf.get_variable('b_h', [self.H], initializer=self.const_initializer)
h = tf.nn.tanh(tf.matmul(features_mean, w_h) + b_h)
w_c = tf.get_variable('w_c', [self.D, self.H], initializer=self.weight_initializer)
b_c = tf.get_variable('b_c', [self.H], initializer=self.const_initializer)
c = tf.nn.tanh(tf.matmul(features_mean, w_c) + b_c)
return c, h
def _word_embedding(self, inputs, reuse=False):
with tf.variable_scope('word_embedding', reuse=reuse):
w = tf.get_variable('w', [self.V, self.M], initializer=self.emb_initializer)
x = tf.nn.embedding_lookup(w, inputs, name='word_vector') # (N, T, M) or (N, M)
return x
def _project_features(self, features):
with tf.variable_scope('project_features'):
w = tf.get_variable('w', [self.D, self.D], initializer=self.weight_initializer)
features_flat = tf.reshape(features, [-1, self.D])
features_proj = tf.matmul(features_flat, w)
features_proj = tf.reshape(features_proj, [-1, self.L, self.D])
return features_proj
def _attention_layer(self, features, features_proj, h, reuse=False):
with tf.variable_scope('attention_layer', reuse=reuse):
w = tf.get_variable('w', [self.H, self.D], initializer=self.weight_initializer)
b = tf.get_variable('b', [self.D], initializer=self.const_initializer)
w_att = tf.get_variable('w_att', [self.D, 1], initializer=self.weight_initializer)
h_att = tf.nn.relu(features_proj + tf.expand_dims(tf.matmul(h, w), 1) + b) # (N, L, D)
out_att = tf.reshape(tf.matmul(tf.reshape(h_att, [-1, self.D]), w_att), [-1, self.L]) # (N, L)
alpha = tf.nn.softmax(out_att)
context = tf.reduce_sum(features * tf.expand_dims(alpha, 2), 1, name='context') #(N, D)
return context, alpha
def _selector(self, context, h, reuse=False):
with tf.variable_scope('selector', reuse=reuse):
w = tf.get_variable('w', [self.H, 1], initializer=self.weight_initializer)
b = tf.get_variable('b', [1], initializer=self.const_initializer)
beta = tf.nn.sigmoid(tf.matmul(h, w) + b, 'beta') # (N, 1)
context = tf.mul(beta, context, name='selected_context')
return context, beta
def _decode_lstm(self, x, h, context, dropout=False, reuse=False):
with tf.variable_scope('logits', reuse=reuse):
w_h = tf.get_variable('w_h', [self.H, self.M], initializer=self.weight_initializer)
b_h = tf.get_variable('b_h', [self.M], initializer=self.const_initializer)
w_out = tf.get_variable('w_out', [self.M, self.V], initializer=self.weight_initializer)
b_out = tf.get_variable('b_out', [self.V], initializer=self.const_initializer)
if dropout:
h = tf.nn.dropout(h, 0.5)
h_logits = tf.matmul(h, w_h) + b_h
if self.ctx2out:
w_ctx2out = tf.get_variable('w_ctx2out', [self.D, self.M], initializer=self.weight_initializer)
h_logits += tf.matmul(context, w_ctx2out)
if self.prev2out:
h_logits += x
h_logits = tf.nn.tanh(h_logits)
if dropout:
h_logits = tf.nn.dropout(h_logits, 0.5)
out_logits = tf.matmul(h_logits, w_out) + b_out
return out_logits
def _batch_norm(self, x, mode='train', name=None):
return tf.contrib.layers.batch_norm(inputs=x,
decay=0.95,
center=True,
scale=True,
is_training=(mode=='train'),
updates_collections=None,
scope=(name+'batch_norm'))
def build_model(self):
features = self.features
captions = self.captions
batch_size = tf.shape(features)[0]
captions_in = captions[:, :self.T]
captions_out = captions[:, 1:]
mask = tf.to_float(tf.not_equal(captions_out, self._null))
# batch normalize feature vectors
features = self._batch_norm(features, mode='train', name='conv_features')
c, h = self._get_initial_lstm(features=features)
x = self._word_embedding(inputs=captions_in)
features_proj = self._project_features(features=features)
loss = 0.0
alpha_list = []
lstm_cell = tf.nn.rnn_cell.BasicLSTMCell(num_units=self.H)
for t in range(self.T):
context, alpha = self._attention_layer(features, features_proj, h, reuse=(t!=0))
alpha_list.append(alpha)
if self.selector:
context, beta = self._selector(context, h, reuse=(t!=0))
with tf.variable_scope('lstm', reuse=(t!=0)):
_, (c, h) = lstm_cell(inputs=tf.concat(1, [x[:,t,:], context]), state=[c, h])
logits = self._decode_lstm(x[:,t,:], h, context, dropout=self.dropout, reuse=(t!=0))
loss += tf.reduce_sum(tf.nn.sparse_softmax_cross_entropy_with_logits(logits, captions_out[:, t]) * mask[:, t])
if self.alpha_c > 0:
alphas = tf.transpose(tf.pack(alpha_list), (1, 0, 2)) # (N, T, L)
alphas_all = tf.reduce_sum(alphas, 1) # (N, L)
alpha_reg = self.alpha_c * tf.reduce_sum((16./196 - alphas_all) ** 2)
loss += alpha_reg
return loss / tf.to_float(batch_size)
def build_sampler(self, max_len=20):
features = self.features
# batch normalize feature vectors
features = self._batch_norm(features, mode='test', name='conv_features')
c, h = self._get_initial_lstm(features=features)
features_proj = self._project_features(features=features)
sampled_word_list = []
alpha_list = []
beta_list = []
lstm_cell = tf.nn.rnn_cell.BasicLSTMCell(num_units=self.H)
for t in range(max_len):
if t == 0:
x = self._word_embedding(inputs=tf.fill([tf.shape(features)[0]], self._start))
else:
x = self._word_embedding(inputs=sampled_word, reuse=True)
context, alpha = self._attention_layer(features, features_proj, h, reuse=(t!=0))
alpha_list.append(alpha)
if self.selector:
context, beta = self._selector(context, h, reuse=(t!=0))
beta_list.append(beta)
with tf.variable_scope('lstm', reuse=(t!=0)):
_, (c, h) = lstm_cell(inputs=tf.concat(1, [x, context]), state=[c, h])
logits = self._decode_lstm(x, h, context, reuse=(t!=0))
sampled_word = tf.argmax(logits, 1)
sampled_word_list.append(sampled_word)
alphas = tf.transpose(tf.pack(alpha_list), (1, 0, 2)) # (N, T, L)
betas = tf.transpose(tf.squeeze(beta_list), (1, 0)) # (N, T)
sampled_captions = tf.transpose(tf.pack(sampled_word_list), (1, 0)) # (N, max_len)
return alphas, betas, sampled_captions
|
|
"""
Module for testing the ``serpentTools`` package
"""
from functools import wraps
from unittest import TestCase, SkipTest
from logging import NOTSET
from numpy import stack
from numpy.testing import assert_allclose
from serpentTools.messages import (
DictHandler, __logger__, removeHandler, addHandler,
)
from serpentTools.utils import checkScipy
def computeMeansErrors(*arrays):
"""
Return the matrices of element-wise means and standard deviations
This function assumes that:
1. Each element in ``arrays`` is a numpy array
2. Each element in ``arrays`` is of equal dimensionality
The arrays are all stacked to create a new array with one
additional axis at index 0. The mean and standard deviation are
computed along this new axis so that the returned arrays
are of equal dimensionality to the incoming arrays
Parameters
----------
arrays: iterable
Arrays to be stacked
Returns
-------
numpy.ndarray
Element-wise mean of all incoming arrays
numpy.ndarray
Element-wise standard deviation of all incoming arrays
"""
workMat = stack(arrays)
return workMat.mean(axis=0), workMat.std(axis=0)
def compareDictOfArrays(expected, actual, fmtMsg=None, rtol=0, atol=0,
testCase=None):
"""
Compare a dictionary of arrays.
Parameters
----------
expected: dict
Dictionary of expected data
actual: dict
Dictionary of actual data.
fmtMsg: str
Message to be passed as the error message. Formatted with
``.format(key=key)``, where ``key`` is the specific key
where the arrays were too different.
rtol: float
Relative tolerance for arrays
atol: float
Absolute tolerance for arrays
testCase: None or :class:`unittest.TestCase`
If given, use the ``testCase.assertSetEqual`` to compare keys
Raises
------
AssertionError:
If the keys in both dictionaries differ, or if any
one array in ``actual`` is too different from it's counterpart
in ``expected``.
"""
fmtMsg = fmtMsg or "Key: {key}"
eKeys = set(expected.keys())
aKeys = set(actual.keys())
if isinstance(testCase, TestCase):
testCase.assertSetEqual(eKeys, aKeys)
else:
in1Not2 = eKeys.difference(aKeys)
in2Not1 = aKeys.difference(eKeys)
errMsg = ''
if any(in1Not2):
errMsg += ('Keys in expected not actual: {}\n'
.format(', '.join(in1Not2)))
if any(in2Not1):
errMsg += ('Keys in actual not expected: {}\n'
.format(', '.join(in2Not1)))
if errMsg:
raise AssertionError(errMsg)
for key, value in expected.items():
actualValue = actual[key]
assert_allclose(value, actualValue, rtol=rtol, atol=atol,
err_msg=fmtMsg.format(key=key))
class LoggerMixin(object):
"""
Mixin class captures log messages
Attributes
----------
handler: :class:`serpentTools.messages.DictHandler`
Logging handler that stores messages in a
:attr:`serpentTools.messages.DictHandler.logMessages`
dictionary according to level.
"""
def __init__(self):
self.__old = []
self.handler = None
def attach(self, level=NOTSET):
"""
Attach the :class:`serpentTools.messages.DictHandler`
Removes all :class:`logging.Handler` objects from the
old logger, and puts them back when :class:`detach` is
called
Parameters
----------
level: int
Initial level to apply to handler
"""
self.handler = DictHandler(level)
self.__old = __logger__.handlers
for handler in self.__old:
removeHandler(handler)
addHandler(self.handler)
def detach(self):
"""Restore the original handers to the main logger"""
if self.handler is None:
raise AttributeError("Handler not set. Possibly not attached.")
removeHandler(self.handler)
for handler in self.__old:
addHandler(handler)
self.handler = None
self.__old = []
def msgInLogs(self, level, msg, partial=False):
"""
Determine if the message is contained in the logs
Parameters
----------
level: str
Level under which this message was posted.
Must be a key in the
:attr:`~serpentTools.messages.DictHandler.logMessages`
on the :attr:`handler` for this class
msg: str
Message to be found in the logs.
partial: bool
If this evaluates to true, then search through each
``message`` in `logMessages` and return ``True`` if
``msg in message``. Otherwise, look for exact matches
Returns
-------
bool:
If the message was found in the logs
Raises
------
KeyError:
If the level was not found in the logs
AttributeError:
If the :attr:`handler` has not been created with :meth:`attach`
"""
if self.handler is None:
raise AttributeError("Handler has not been attached. Must run "
"<attach> first")
logs = self.handler.logMessages
if level not in logs:
raise KeyError("Level {} not found in logs. Existing levels:\n{}"
.format(level, list(sorted(logs.keys()))))
if not partial:
return msg in logs[level]
for message in logs[level]:
if msg in message:
return True
return False
class TestCaseWithLogCapture(TestCase, LoggerMixin):
"""
Lightly overwritten :class:`unittest.TestCase` that captures logs
Mix in the :class:`LoggerMixin` to automatically
:meth:`~LoggerMixin.attach` during
:meth:`~unittest.TestCase.setUp` and :meth:`~LoggerMixin.detach`
during :meth:`~unittest.TestCase.tearDown`
Intended to be subclassed for actual test methods
"""
def __init__(self, *args, **kwargs):
TestCase.__init__(self, *args, **kwargs)
LoggerMixin.__init__(self)
def setUp(self):
"""
Method to be called before every individual test.
Call ``attach`` to capture any log messages that would
be presented during testing.
Should be called during any subclassing.
"""
LoggerMixin.attach(self)
def tearDown(self):
"""
Method to be called immediately after calling and recording test
Call ``detach`` to reset the module logger to its original state.
Should be called during any subclassing.
"""
LoggerMixin.detach(self)
def _concatLogs(self, level):
logs = self.handler.logMessages.get(level, [])
return "\n- ".join([str(item) for item in logs])
def assertMsgInLogs(self, level, msg, partial=False):
"""
Assert that the message was stored under a given level
Combines :meth:`LoggerMixin.msgInLogs` with
:meth:`unittest.TestCase.assertTrue`
"""
matchType = "a partial" if partial else "an exact"
failMsg = "Could not find {} match for {} under {}\n{}".format(
matchType, msg, level, self._concatLogs(level))
self.assertTrue(self.msgInLogs(level, msg, partial),
msg=failMsg)
def assertMsgNotInLogs(self, level, msg, partial=False):
"""
Assert that the message was not stored under a given level
Combines :meth:`LoggerMixin.msgInLogs` with
:meth:`unittest.TestCase.assertFalse`
"""
matchType = "a partial" if partial else "an exact"
failMsg = "Found {} match for {} under {} but should not have"
self.assertFalse(self.msgInLogs(level, msg, partial),
msg=failMsg.format(matchType, msg, level))
HAS_SCIPY = checkScipy('1.0')
class MatlabTesterHelper(TestCase):
"""Helper class for matlab conversion"""
def setUp(self):
"""Call this from subclasses to skip if scipy is unavailable"""
# skip tests if scipy not installed
if not HAS_SCIPY:
raise SkipTest("scipy needed to test matlab conversion")
def plotTest(f):
"""Decorator that clears up existing plots prior to test."""
from matplotlib.pyplot import close, figure
@wraps(f)
def wrappedTest(*args, **kwargs):
close('all')
figure()
return f(*args, **kwargs)
return wrappedTest
def getLegendTexts(ax):
"""
Return all texts for items in legend.
An empty list signifies no legend or no labeled
objects.
"""
lgd = ax.get_legend()
if lgd is None:
return []
return [item.get_text() for item in lgd.get_texts()]
def plotAttrTest(testobj, ax=None, xlabel=None, ylabel=None,
xscale=None, yscale=None, legendLabels=None,
title=None):
"""Compare properties of a generated axes object"""
if ax is None:
from matplotlib.pyplot import gca
ax = gca()
if xlabel is not None:
testobj.assertEqual(xlabel, ax.get_xlabel(), msg='xlabel')
if ylabel is not None:
testobj.assertEqual(ylabel, ax.get_ylabel(), msg='ylabel')
if xscale is not None:
testobj.assertEqual(xscale, ax.get_xscale(), msg='xscale')
if yscale is not None:
testobj.assertEqual(yscale, ax.get_yscale(), msg='yscale')
if legendLabels is not None:
if isinstance(legendLabels, str):
legendLabels = [legendLabels, ]
testobj.assertEqual(legendLabels, getLegendTexts(ax),
msg='legend text')
if title is not None:
testobj.assertEqual(title, ax.get_title(), msg='title')
|
|
"""Support for HomematicIP Cloud climate devices."""
import logging
from typing import Any, Dict, List, Optional, Union
from homematicip.aio.device import AsyncHeatingThermostat, AsyncHeatingThermostatCompact
from homematicip.aio.group import AsyncHeatingGroup
from homematicip.base.enums import AbsenceType
from homematicip.device import Switch
from homematicip.functionalHomes import IndoorClimateHome
from homeassistant.components.climate import ClimateDevice
from homeassistant.components.climate.const import (
HVAC_MODE_AUTO,
HVAC_MODE_COOL,
HVAC_MODE_HEAT,
HVAC_MODE_OFF,
PRESET_AWAY,
PRESET_BOOST,
PRESET_ECO,
PRESET_NONE,
SUPPORT_PRESET_MODE,
SUPPORT_TARGET_TEMPERATURE,
)
from homeassistant.config_entries import ConfigEntry
from homeassistant.const import ATTR_TEMPERATURE, TEMP_CELSIUS
from homeassistant.helpers.typing import HomeAssistantType
from . import DOMAIN as HMIPC_DOMAIN, HMIPC_HAPID, HomematicipGenericDevice
from .hap import HomematicipHAP
HEATING_PROFILES = {"PROFILE_1": 0, "PROFILE_2": 1, "PROFILE_3": 2}
COOLING_PROFILES = {"PROFILE_4": 3, "PROFILE_5": 4, "PROFILE_6": 5}
_LOGGER = logging.getLogger(__name__)
ATTR_PRESET_END_TIME = "preset_end_time"
PERMANENT_END_TIME = "permanent"
HMIP_AUTOMATIC_CM = "AUTOMATIC"
HMIP_MANUAL_CM = "MANUAL"
HMIP_ECO_CM = "ECO"
async def async_setup_platform(
hass, config, async_add_entities, discovery_info=None
) -> None:
"""Set up the HomematicIP Cloud climate devices."""
pass
async def async_setup_entry(
hass: HomeAssistantType, config_entry: ConfigEntry, async_add_entities
) -> None:
"""Set up the HomematicIP climate from a config entry."""
hap = hass.data[HMIPC_DOMAIN][config_entry.data[HMIPC_HAPID]]
entities = []
for device in hap.home.groups:
if isinstance(device, AsyncHeatingGroup):
entities.append(HomematicipHeatingGroup(hap, device))
if entities:
async_add_entities(entities)
class HomematicipHeatingGroup(HomematicipGenericDevice, ClimateDevice):
"""Representation of a HomematicIP heating group.
Heat mode is supported for all heating devices incl. their defined profiles.
Boost is available for radiator thermostats only.
Cool mode is only available for floor heating systems, if basically enabled in the hmip app.
"""
def __init__(self, hap: HomematicipHAP, device: AsyncHeatingGroup) -> None:
"""Initialize heating group."""
device.modelType = "HmIP-Heating-Group"
super().__init__(hap, device)
self._simple_heating = None
if device.actualTemperature is None:
self._simple_heating = self._first_radiator_thermostat
@property
def device_info(self) -> Dict[str, Any]:
"""Return device specific attributes."""
return {
"identifiers": {(HMIPC_DOMAIN, self._device.id)},
"name": self._device.label,
"manufacturer": "eQ-3",
"model": self._device.modelType,
"via_device": (HMIPC_DOMAIN, self._device.homeId),
}
@property
def temperature_unit(self) -> str:
"""Return the unit of measurement."""
return TEMP_CELSIUS
@property
def supported_features(self) -> int:
"""Return the list of supported features."""
return SUPPORT_PRESET_MODE | SUPPORT_TARGET_TEMPERATURE
@property
def target_temperature(self) -> float:
"""Return the temperature we try to reach."""
return self._device.setPointTemperature
@property
def current_temperature(self) -> float:
"""Return the current temperature."""
if self._simple_heating:
return self._simple_heating.valveActualTemperature
return self._device.actualTemperature
@property
def current_humidity(self) -> int:
"""Return the current humidity."""
return self._device.humidity
@property
def hvac_mode(self) -> str:
"""Return hvac operation ie."""
if self._disabled_by_cooling_mode and not self._has_switch:
return HVAC_MODE_OFF
if self._device.boostMode:
return HVAC_MODE_HEAT
if self._device.controlMode == HMIP_MANUAL_CM:
return HVAC_MODE_HEAT if self._heat_mode_enabled else HVAC_MODE_COOL
return HVAC_MODE_AUTO
@property
def hvac_modes(self) -> List[str]:
"""Return the list of available hvac operation modes."""
if self._disabled_by_cooling_mode and not self._has_switch:
return [HVAC_MODE_OFF]
return (
[HVAC_MODE_AUTO, HVAC_MODE_HEAT]
if self._heat_mode_enabled
else [HVAC_MODE_AUTO, HVAC_MODE_COOL]
)
@property
def preset_mode(self) -> Optional[str]:
"""Return the current preset mode."""
if self._device.boostMode:
return PRESET_BOOST
if self.hvac_mode in (HVAC_MODE_COOL, HVAC_MODE_HEAT, HVAC_MODE_OFF):
return PRESET_NONE
if self._device.controlMode == HMIP_ECO_CM:
if self._indoor_climate.absenceType == AbsenceType.VACATION:
return PRESET_AWAY
if self._indoor_climate.absenceType in [
AbsenceType.PARTY,
AbsenceType.PERIOD,
AbsenceType.PERMANENT,
]:
return PRESET_ECO
return (
self._device.activeProfile.name
if self._device.activeProfile.name in self._device_profile_names
else None
)
@property
def preset_modes(self) -> List[str]:
"""Return a list of available preset modes incl. hmip profiles."""
# Boost is only available if a radiator thermostat is in the room,
# and heat mode is enabled.
profile_names = self._device_profile_names
presets = []
if (
self._heat_mode_enabled and self._has_radiator_thermostat
) or self._has_switch:
if not profile_names:
presets.append(PRESET_NONE)
presets.append(PRESET_BOOST)
presets.extend(profile_names)
return presets
@property
def min_temp(self) -> float:
"""Return the minimum temperature."""
return self._device.minTemperature
@property
def max_temp(self) -> float:
"""Return the maximum temperature."""
return self._device.maxTemperature
async def async_set_temperature(self, **kwargs) -> None:
"""Set new target temperature."""
temperature = kwargs.get(ATTR_TEMPERATURE)
if temperature is None:
return
if self.min_temp <= temperature <= self.max_temp:
await self._device.set_point_temperature(temperature)
async def async_set_hvac_mode(self, hvac_mode: str) -> None:
"""Set new target hvac mode."""
if hvac_mode not in self.hvac_modes:
return
if hvac_mode == HVAC_MODE_AUTO:
await self._device.set_control_mode(HMIP_AUTOMATIC_CM)
else:
await self._device.set_control_mode(HMIP_MANUAL_CM)
async def async_set_preset_mode(self, preset_mode: str) -> None:
"""Set new preset mode."""
if preset_mode not in self.preset_modes:
return
if self._device.boostMode and preset_mode != PRESET_BOOST:
await self._device.set_boost(False)
if preset_mode == PRESET_BOOST:
await self._device.set_boost()
if preset_mode in self._device_profile_names:
profile_idx = self._get_profile_idx_by_name(preset_mode)
if self._device.controlMode != HMIP_AUTOMATIC_CM:
await self.async_set_hvac_mode(HVAC_MODE_AUTO)
await self._device.set_active_profile(profile_idx)
@property
def device_state_attributes(self) -> Dict[str, Any]:
"""Return the state attributes of the access point."""
state_attr = super().device_state_attributes
if self._device.controlMode == HMIP_ECO_CM:
if self._indoor_climate.absenceType in [
AbsenceType.PARTY,
AbsenceType.PERIOD,
AbsenceType.VACATION,
]:
state_attr[ATTR_PRESET_END_TIME] = self._indoor_climate.absenceEndTime
elif self._indoor_climate.absenceType == AbsenceType.PERMANENT:
state_attr[ATTR_PRESET_END_TIME] = PERMANENT_END_TIME
return state_attr
@property
def _indoor_climate(self) -> IndoorClimateHome:
"""Return the hmip indoor climate functional home of this group."""
return self._home.get_functionalHome(IndoorClimateHome)
@property
def _device_profiles(self) -> List[str]:
"""Return the relevant profiles."""
return [
profile
for profile in self._device.profiles
if profile.visible
and profile.name != ""
and profile.index in self._relevant_profile_group
]
@property
def _device_profile_names(self) -> List[str]:
"""Return a collection of profile names."""
return [profile.name for profile in self._device_profiles]
def _get_profile_idx_by_name(self, profile_name: str) -> int:
"""Return a profile index by name."""
relevant_index = self._relevant_profile_group
index_name = [
profile.index
for profile in self._device_profiles
if profile.name == profile_name
]
return relevant_index[index_name[0]]
@property
def _heat_mode_enabled(self) -> bool:
"""Return, if heating mode is enabled."""
return not self._device.cooling
@property
def _disabled_by_cooling_mode(self) -> bool:
"""Return, if group is disabled by the cooling mode."""
return self._device.cooling and (
self._device.coolingIgnored or not self._device.coolingAllowed
)
@property
def _relevant_profile_group(self) -> List[str]:
"""Return the relevant profile groups."""
if self._disabled_by_cooling_mode:
return []
return HEATING_PROFILES if self._heat_mode_enabled else COOLING_PROFILES
@property
def _has_switch(self) -> bool:
"""Return, if a switch is in the hmip heating group."""
for device in self._device.devices:
if isinstance(device, Switch):
return True
return False
@property
def _has_radiator_thermostat(self) -> bool:
"""Return, if a radiator thermostat is in the hmip heating group."""
return bool(self._first_radiator_thermostat)
@property
def _first_radiator_thermostat(
self,
) -> Optional[Union[AsyncHeatingThermostat, AsyncHeatingThermostatCompact]]:
"""Return the first radiator thermostat from the hmip heating group."""
for device in self._device.devices:
if isinstance(
device, (AsyncHeatingThermostat, AsyncHeatingThermostatCompact)
):
return device
return None
|
|
# Copyright 2012 OpenStack Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""Workflow Logic the Identity service."""
from oslo_log import log
from keystone.common import controller
from keystone.common import dependency
from keystone.common import validation
import keystone.conf
from keystone import exception
from keystone.i18n import _, _LW
from keystone.identity import schema
CONF = keystone.conf.CONF
LOG = log.getLogger(__name__)
@dependency.requires('assignment_api', 'identity_api', 'resource_api')
class User(controller.V2Controller):
@controller.v2_deprecated
def get_user(self, request, user_id):
self.assert_admin(request)
ref = self.identity_api.get_user(user_id)
return {'user': self.v3_to_v2_user(ref)}
@controller.v2_deprecated
def get_users(self, request):
# NOTE(termie): i can't imagine that this really wants all the data
# about every single user in the system...
if 'name' in request.params:
return self.get_user_by_name(request, request.params['name'])
self.assert_admin(request)
user_list = self.identity_api.list_users(
CONF.identity.default_domain_id)
return {'users': self.v3_to_v2_user(user_list)}
@controller.v2_deprecated
def get_user_by_name(self, request, user_name):
self.assert_admin(request)
ref = self.identity_api.get_user_by_name(
user_name, CONF.identity.default_domain_id)
return {'user': self.v3_to_v2_user(ref)}
# CRUD extension
@controller.v2_deprecated
def create_user(self, request, user):
validation.lazy_validate(schema.user_create_v2, user)
user = self._normalize_OSKSADM_password_on_request(user)
user = self.normalize_username_in_request(user)
user = self._normalize_dict(user)
self.assert_admin(request)
default_project_id = user.pop('tenantId', None)
if default_project_id is not None:
# Check to see if the project is valid before moving on.
self.resource_api.get_project(default_project_id)
user['default_project_id'] = default_project_id
self.resource_api.ensure_default_domain_exists()
# The manager layer will generate the unique ID for users
user_ref = self._normalize_domain_id(request, user.copy())
new_user_ref = self.v3_to_v2_user(
self.identity_api.create_user(
user_ref, initiator=request.audit_initiator
)
)
if default_project_id is not None:
self.assignment_api.add_user_to_project(default_project_id,
new_user_ref['id'])
return {'user': new_user_ref}
@controller.v2_deprecated
def update_user(self, request, user_id, user):
# NOTE(termie): this is really more of a patch than a put
validation.lazy_validate(schema.user_update_v2, user)
user = self.normalize_username_in_request(user)
self.assert_admin(request)
default_project_id = user.pop('tenantId', None)
if default_project_id is not None:
user['default_project_id'] = default_project_id
old_user_ref = self.v3_to_v2_user(
self.identity_api.get_user(user_id))
# Check whether a tenant is being added or changed for the user.
# Catch the case where the tenant is being changed for a user and also
# where a user previously had no tenant but a tenant is now being
# added for the user.
if (('tenantId' in old_user_ref and
old_user_ref['tenantId'] != default_project_id and
default_project_id is not None) or
('tenantId' not in old_user_ref and
default_project_id is not None)):
# Make sure the new project actually exists before we perform the
# user update.
self.resource_api.get_project(default_project_id)
user_ref = self.identity_api.update_user(
user_id, user, initiator=request.audit_initiator
)
user_ref = self.v3_to_v2_user(user_ref)
# If 'tenantId' is in either ref, we might need to add or remove the
# user from a project.
if 'tenantId' in user_ref or 'tenantId' in old_user_ref:
if user_ref['tenantId'] != old_user_ref.get('tenantId'):
if old_user_ref.get('tenantId'):
try:
member_role_id = CONF.member_role_id
self.assignment_api.remove_role_from_user_and_project(
user_id, old_user_ref['tenantId'], member_role_id)
except exception.NotFound:
# NOTE(morganfainberg): This is not a critical error it
# just means that the user cannot be removed from the
# old tenant. This could occur if roles aren't found
# or if the project is invalid or if there are no roles
# for the user on that project.
msg = _LW('Unable to remove user %(user)s from '
'%(tenant)s.')
LOG.warning(msg, {'user': user_id,
'tenant': old_user_ref['tenantId']})
if user_ref['tenantId']:
try:
self.assignment_api.add_user_to_project(
user_ref['tenantId'], user_id)
except exception.Conflict: # nosec
# We are already a member of that tenant
pass
except exception.NotFound:
# NOTE(morganfainberg): Log this and move on. This is
# not the end of the world if we can't add the user to
# the appropriate tenant. Most of the time this means
# that the project is invalid or roles are some how
# incorrect. This shouldn't prevent the return of the
# new ref.
msg = _LW('Unable to add user %(user)s to %(tenant)s.')
LOG.warning(msg, {'user': user_id,
'tenant': user_ref['tenantId']})
return {'user': user_ref}
@controller.v2_deprecated
def delete_user(self, request, user_id):
self.assert_admin(request)
self.identity_api.delete_user(
user_id, initiator=request.audit_initiator
)
@controller.v2_deprecated
def set_user_enabled(self, request, user_id, user):
validation.lazy_validate(schema.enable_user_v2, user)
return self.update_user(request, user_id, user)
@controller.v2_deprecated
def set_user_password(self, request, user_id, user):
user = self._normalize_OSKSADM_password_on_request(user)
return self.update_user(request, user_id, user)
@staticmethod
def _normalize_OSKSADM_password_on_request(ref):
"""Set the password from the OS-KSADM Admin Extension.
The OS-KSADM Admin Extension documentation says that
`OS-KSADM:password` can be used in place of `password`.
"""
if 'OS-KSADM:password' in ref:
ref['password'] = ref.pop('OS-KSADM:password')
return ref
@dependency.requires('identity_api')
class UserV3(controller.V3Controller):
collection_name = 'users'
member_name = 'user'
def __init__(self):
super(UserV3, self).__init__()
self.get_member_from_driver = self.identity_api.get_user
def _check_user_and_group_protection(self, request, prep_info,
user_id, group_id):
ref = {}
ref['user'] = self.identity_api.get_user(user_id)
ref['group'] = self.identity_api.get_group(group_id)
self.check_protection(request, prep_info, ref)
def _check_group_protection(self, request, prep_info, group_id):
ref = {}
ref['group'] = self.identity_api.get_group(group_id)
self.check_protection(request, prep_info, ref)
@controller.protected()
def create_user(self, request, user):
validation.lazy_validate(schema.user_create, user)
# The manager layer will generate the unique ID for users
ref = self._normalize_dict(user)
ref = self._normalize_domain_id(request, ref)
ref = self.identity_api.create_user(
ref, initiator=request.audit_initiator
)
return UserV3.wrap_member(request.context_dict, ref)
@controller.filterprotected('domain_id', 'enabled', 'idp_id', 'name',
'protocol_id', 'unique_id',
'password_expires_at')
def list_users(self, request, filters):
hints = UserV3.build_driver_hints(request, filters)
domain = self._get_domain_id_for_list_request(request)
refs = self.identity_api.list_users(domain_scope=domain, hints=hints)
return UserV3.wrap_collection(request.context_dict, refs, hints=hints)
@controller.filterprotected('domain_id', 'enabled', 'name',
'password_expires_at',
callback=_check_group_protection)
def list_users_in_group(self, request, filters, group_id):
hints = UserV3.build_driver_hints(request, filters)
refs = self.identity_api.list_users_in_group(group_id, hints=hints)
return UserV3.wrap_collection(request.context_dict, refs, hints=hints)
@controller.protected()
def get_user(self, request, user_id):
ref = self.identity_api.get_user(user_id)
return UserV3.wrap_member(request.context_dict, ref)
def _update_user(self, request, user_id, user):
self._require_matching_id(user_id, user)
ref = self.identity_api.update_user(
user_id, user, initiator=request.audit_initiator
)
return UserV3.wrap_member(request.context_dict, ref)
@controller.protected()
def update_user(self, request, user_id, user):
validation.lazy_validate(schema.user_update, user)
return self._update_user(request, user_id, user)
@controller.protected(callback=_check_user_and_group_protection)
def add_user_to_group(self, request, user_id, group_id):
self.identity_api.add_user_to_group(
user_id, group_id, initiator=request.audit_initiator
)
@controller.protected(callback=_check_user_and_group_protection)
def check_user_in_group(self, request, user_id, group_id):
return self.identity_api.check_user_in_group(user_id, group_id)
@controller.protected(callback=_check_user_and_group_protection)
def remove_user_from_group(self, request, user_id, group_id):
self.identity_api.remove_user_from_group(
user_id, group_id, initiator=request.audit_initiator
)
@controller.protected()
def delete_user(self, request, user_id):
return self.identity_api.delete_user(
user_id, initiator=request.audit_initiator
)
# NOTE(gagehugo): We do not need this to be @protected.
# A user is already expected to know their password in order
# to change it, and can be authenticated as such.
def change_password(self, request, user_id, user):
original_password = user.get('original_password')
if original_password is None:
raise exception.ValidationError(target='user',
attribute='original_password')
password = user.get('password')
if password is None:
raise exception.ValidationError(target='user',
attribute='password')
try:
self.identity_api.change_password(
request, user_id, original_password,
password, initiator=request.audit_initiator)
except AssertionError as e:
raise exception.Unauthorized(_(
'Error when changing user password: %s') % e)
@dependency.requires('identity_api')
class GroupV3(controller.V3Controller):
collection_name = 'groups'
member_name = 'group'
def __init__(self):
super(GroupV3, self).__init__()
self.get_member_from_driver = self.identity_api.get_group
def _check_user_protection(self, request, prep_info, user_id):
ref = {}
ref['user'] = self.identity_api.get_user(user_id)
self.check_protection(request, prep_info, ref)
@controller.protected()
def create_group(self, request, group):
validation.lazy_validate(schema.group_create, group)
# The manager layer will generate the unique ID for groups
ref = self._normalize_dict(group)
ref = self._normalize_domain_id(request, ref)
ref = self.identity_api.create_group(
ref, initiator=request.audit_initiator
)
return GroupV3.wrap_member(request.context_dict, ref)
@controller.filterprotected('domain_id', 'name')
def list_groups(self, request, filters):
hints = GroupV3.build_driver_hints(request, filters)
domain = self._get_domain_id_for_list_request(request)
refs = self.identity_api.list_groups(domain_scope=domain, hints=hints)
return GroupV3.wrap_collection(request.context_dict, refs, hints=hints)
@controller.filterprotected('name', callback=_check_user_protection)
def list_groups_for_user(self, request, filters, user_id):
hints = GroupV3.build_driver_hints(request, filters)
refs = self.identity_api.list_groups_for_user(user_id, hints=hints)
return GroupV3.wrap_collection(request.context_dict, refs, hints=hints)
@controller.protected()
def get_group(self, request, group_id):
ref = self.identity_api.get_group(group_id)
return GroupV3.wrap_member(request.context_dict, ref)
@controller.protected()
def update_group(self, request, group_id, group):
validation.lazy_validate(schema.group_update, group)
self._require_matching_id(group_id, group)
ref = self.identity_api.update_group(
group_id, group, initiator=request.audit_initiator
)
return GroupV3.wrap_member(request.context_dict, ref)
@controller.protected()
def delete_group(self, request, group_id):
self.identity_api.delete_group(
group_id, initiator=request.audit_initiator
)
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.