gt
stringclasses 1
value | context
stringlengths 2.49k
119k
|
|---|---|
# (C) Copyright 2014-2016 Hewlett Packard Enterprise Development LP
# Copyright 2017 Fujitsu LIMITED
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests the AlarmProcessor"""
import collections
import json
import time
from unittest import mock
from monasca_common.kafka import legacy_kafka_message
from monasca_notification import notification as m_notification
from monasca_notification.processors import alarm_processor
from tests import base
alarm_tuple = collections.namedtuple('alarm_tuple', ['offset', 'message'])
message_tuple = collections.namedtuple('message_tuple', ['key', 'value'])
class TestAlarmProcessor(base.BaseTestCase):
def setUp(self):
super(TestAlarmProcessor, self).setUp()
self.trap = []
def _create_raw_alarm(self, partition, offset, message, key=1):
"""Create a raw alarm, with the given message dictionary.
"""
json_msg = json.dumps({'alarm-transitioned': message})
msg_tuple = message_tuple(key, json_msg)
return legacy_kafka_message.LegacyKafkaMessage([partition,
alarm_tuple(offset,
msg_tuple)])
@mock.patch('pymysql.connect')
@mock.patch('monasca_notification.processors.alarm_processor.log')
def _run_alarm_processor(self, alarm, sql_response, mock_log, mock_mysql):
"""Runs a mocked alarm processor reading from queue while running,
returns (queue_message, log_message)
"""
# Since the log runs in another thread I can mock it directly, instead
# change the methods to put to a queue
mock_log.warn = self.trap.append
mock_log.error = self.trap.append
mock_log.exception = self.trap.append
# Setup the sql response
if sql_response is not None:
mock_mysql.return_value = mock_mysql
mock_mysql.cursor.return_value = mock_mysql
mock_mysql.__iter__.return_value = sql_response
self.conf_override(group='mysql', ssl=None,
host='localhost', port='3306',
user='mysql_user', db='dbname',
passwd='mysql_passwd')
self.conf_override(group='statsd', host='localhost',
port=8125)
processor = alarm_processor.AlarmProcessor()
return processor.to_notification(alarm)
def test_invalid_alarm(self):
"""Invalid Alarms, should log and error and push to the finished queue."""
alarm = self._create_raw_alarm(0, 1, {'invalid': 'invalid_alarm'})
notifications, partition, offset = self._run_alarm_processor(alarm, None)
self.assertEqual(notifications, [])
self.assertEqual(partition, 0)
self.assertEqual(offset, 1)
invalid_msg = ('Invalid Alarm format skipping partition 0, offset 1\n'
'ErrorAlarm data missing field actionsEnabled')
self.assertIn(invalid_msg, self.trap)
def test_old_timestamp(self):
"""Should cause the alarm_ttl to fire log a warning and push to finished queue."""
timestamp = 1375346830042
alarm_dict = {
"tenantId": "0",
"alarmDefinitionId": "0",
"alarmId": "1",
"alarmName": "test Alarm",
"oldState": "OK",
"newState": "ALARM",
"stateChangeReason": "I am alarming!",
"timestamp": timestamp,
"actionsEnabled": 1,
"metrics": "cpu_util",
"severity": "LOW",
"link": "http://some-place.com",
"lifecycleState": "OPEN"}
alarm = self._create_raw_alarm(0, 2, alarm_dict)
expected_datetime = time.ctime(timestamp / 1000)
notifications, partition, offset = self._run_alarm_processor(alarm, None)
self.assertEqual(notifications, [])
self.assertEqual(partition, 0)
self.assertEqual(offset, 2)
old_msg = ('Received alarm older than the ttl, skipping. '
'Alarm from {datetime}'.format(datetime=expected_datetime))
self.assertIn(old_msg, self.trap)
def test_no_notifications(self):
"""Test an alarm with no defined notifications
"""
alarm_dict = {
"tenantId": "0",
"alarmDefinitionId": "0",
"alarmId": "1",
"alarmName": "test Alarm",
"oldState": "OK",
"newState": "ALARM",
"stateChangeReason": "I am alarming!",
"timestamp": time.time() * 1000,
"actionsEnabled": 1,
"metrics": "cpu_util",
"severity": "LOW",
"link": "http://some-place.com",
"lifecycleState": "OPEN"}
alarm = self._create_raw_alarm(0, 3, alarm_dict)
notifications, partition, offset = self._run_alarm_processor(alarm, None)
self.assertEqual(notifications, [])
self.assertEqual(partition, 0)
self.assertEqual(offset, 3)
def test_valid_notification(self):
"""Test a valid notification, being put onto the notification_queue
"""
alarm_dict = {
"tenantId": "0",
"alarmDefinitionId": "0",
"alarmId": "1",
"alarmName": "test Alarm",
"oldState": "OK",
"newState": "ALARM",
"stateChangeReason": "I am alarming!",
"timestamp": time.time() * 1000,
"actionsEnabled": 1,
"metrics": "cpu_util",
"severity": "LOW",
"link": "http://some-place.com",
"lifecycleState": "OPEN"}
alarm = self._create_raw_alarm(0, 4, alarm_dict)
sql_response = [[1, 'EMAIL', 'test notification', 'me@here.com', 0]]
notifications, partition, offset = self._run_alarm_processor(alarm, sql_response)
test_notification = m_notification.Notification(1, 'email', 'test notification',
'me@here.com', 0, 0, alarm_dict)
self.assertEqual(notifications, [test_notification])
self.assertEqual(partition, 0)
self.assertEqual(offset, 4)
def test_two_valid_notifications(self):
alarm_dict = {
"tenantId": "0",
"alarmDefinitionId": "0",
"alarmId": "1",
"alarmName": "test Alarm",
"oldState": "OK",
"newState": "ALARM",
"stateChangeReason": "I am alarming!",
"timestamp": time.time() * 1000,
"actionsEnabled": 1,
"metrics": "cpu_util",
"severity": "LOW",
"link": "http://some-place.com",
"lifecycleState": "OPEN"}
alarm = self._create_raw_alarm(0, 5, alarm_dict)
sql_response = [[1, 'EMAIL', 'test notification', 'me@here.com', 0],
[2, 'EMAIL', 'test notification2', 'me@here.com', 0]]
notifications, partition, offset = self._run_alarm_processor(alarm, sql_response)
test_notification = m_notification.Notification(1, 'email', 'test notification',
'me@here.com', 0, 0, alarm_dict)
test_notification2 = m_notification.Notification(2, 'email', 'test notification2',
'me@here.com', 0, 0, alarm_dict)
self.assertEqual(notifications, [test_notification, test_notification2])
self.assertEqual(partition, 0)
self.assertEqual(offset, 5)
|
|
# coding=utf-8
# Copyright 2021 DeepMind Technologies Limited.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Library functions for verification of neural networks using functional lagrange multipliers."""
import abc
import dataclasses
from typing import Any, Callable, Dict, Iterable, List, Optional, Sequence, Tuple, Union
import jax
from jax import lax
import jax.numpy as jnp
from jax_verify.extensions.functional_lagrangian import lagrangian_form as lag_form
from jax_verify.extensions.functional_lagrangian import verify_utils
from jax_verify.extensions.sdp_verify import sdp_verify
from jax_verify.extensions.sdp_verify import utils as sdp_utils
from jax_verify.src import bound_propagation
from jax_verify.src import graph_traversal
from jax_verify.src import synthetic_primitives
import numpy as np
import optax
Params = verify_utils.Params
ParamsTypes = verify_utils.ParamsTypes
InnerVerifInstance = verify_utils.InnerVerifInstance
LagrangianForm = lag_form.LagrangianForm
Nest = bound_propagation.Nest
class DualOp(bound_propagation.Bound):
"""Lagrangian dual contribution."""
def __init__(
self,
name,
base_bound: bound_propagation.Bound,
affine_fn: Callable[[jnp.array], jnp.array],
inputs: Optional[Sequence[Union['DualOp', jnp.array]]] = None,
relu_preact_name: Optional[int] = None):
self.name = name
self._base_bound = base_bound
self._affine_fn = affine_fn
self._inputs = inputs
self._relu_preact_name = relu_preact_name
@property
def base_bound(self) -> bound_propagation.Bound:
return self._base_bound
@property
def lower(self) -> jnp.array:
return self._base_bound.lower
@property
def upper(self) -> jnp.array:
return self._base_bound.upper
@property
def shape(self) -> Sequence[int]:
return self._base_bound.lower.shape
def affine(self, act_or_input):
return self._affine_fn(act_or_input)
@property
def is_input(self) -> bool:
return self._inputs is None
@property
def is_relu(self) -> bool:
return self._relu_preact_name is not None
@property
def relu_preact_name(self) -> int:
if self._relu_preact_name is None:
raise ValueError('Not an activation.')
return self._relu_preact_name
@property
def inputs(self) -> Sequence[Union['DualOp', jnp.array]]:
if self._inputs is None:
raise ValueError('Input node does not have inputs')
return self._inputs
_affine_primitives_list = (
bound_propagation.AFFINE_PRIMITIVES +
bound_propagation.RESHAPE_PRIMITIVES +
[lax.div_p]
)
class _LagrangianTransform(bound_propagation.GraphTransform[DualOp]):
"""Identifies graph nodes having Lagrangian dual contributions."""
def __init__(self, boundprop_transform: bound_propagation.BoundTransform):
"""Defines propagation of Lagrangian dual contributions.
Args:
boundprop_transform: Basic Jax primitive ops' equivalents for
the underlying bound propagation method.
"""
self._boundprop_transform = boundprop_transform
def input_transform(self, context, lower_bound, upper_bound):
in_bounds = self._boundprop_transform.input_transform(
context, lower_bound, upper_bound)
return DualOp(context.index, in_bounds, lambda x: x, inputs=None)
def primitive_transform(self, context, primitive, *args, **params):
interval_args = [arg.base_bound if isinstance(arg, DualOp) else arg
for arg in args]
out_bounds = self._boundprop_transform.equation_transform(
context, primitive, *interval_args, **params)
if primitive in _affine_primitives_list:
if (primitive in bound_propagation.BILINEAR_PRIMITIVES and
isinstance(args[0], DualOp) and isinstance(args[1], DualOp)):
raise NotImplementedError(
'Multiplication with two non-constant inputs is not supported')
elif primitive == lax.div_p and isinstance(args[1], DualOp):
raise NotImplementedError(
f'Division with non-constant divisor {args[1]} is not supported')
# Compose this affine primitive with the inputs' own affine functions
# in terms of the previous ReLU activation (or original network input).
def affine_fn(act_or_input):
return primitive.bind(*[
arg.affine(act_or_input) if isinstance(arg, DualOp) else arg
for arg in args], **params)
return DualOp(context.index, out_bounds, affine_fn, inputs=args)
elif primitive == synthetic_primitives.relu_p:
return DualOp(
context.index, out_bounds, lambda x: x, inputs=args,
relu_preact_name=args[0].name)
else:
raise NotImplementedError(f'Unsupported primitive: {primitive}')
class InnerMaxStrategy(metaclass=abc.ABCMeta):
"""Solve inner maximisations."""
jittable = True
@abc.abstractmethod
def solve_max(
self,
inner_dual_vars: Any,
opt_instance: InnerVerifInstance,
key: jnp.array,
step: int,
) -> jnp.array:
"""Solve maximization problem of opt_instance.
Args:
inner_dual_vars: Dual variables for the inner maximisation.
opt_instance: Verification instance that defines optimization problem to
be solved.
key: Jax PRNG key.
step: outer optimization iteration number.
Returns:
max_value: final value of the objective function found.
"""
def supports_stochastic_parameters(self):
return False
def build_spec(self, opt_instance: InnerVerifInstance, step: int,
softmax: bool = False):
"""Build objective function for the maximization problem."""
# affine_fns are assumed to be non-batched in both inputs and ouputs
affine_fns = opt_instance.affine_fns
lag_form_pre = opt_instance.lagrangian_form_pre
lag_form_post = opt_instance.lagrangian_form_post
def forward_relu_before_affine(x):
# we use relu before affine ordering
# -> first op is relu unless this is the first layer
if not opt_instance.is_first:
x = jax.nn.relu(x)
# forward through intermediate layers of opt_instance
for affine_fn in affine_fns[:-1]:
x = affine_fn(x)
x = jax.nn.relu(x)
# forward through last layer of opt_instance
x = affine_fns[-1](x)
return x
def forward_affine_before_relu(x):
# forward through intermediate layers of opt_instance
for affine_fn in affine_fns[:-1]:
x = affine_fn(x)
x = jax.nn.relu(x)
# forward through last layer of opt_instance, which contains activations
# unless it is the last layer of the network
x = affine_fns[-1](x)
if not opt_instance.is_last:
x = jax.nn.relu(x)
return x
forward = (
forward_affine_before_relu if opt_instance.affine_before_relu
else forward_relu_before_affine)
def obj_first(x, duals_pre, duals_post):
del duals_pre # unused
return lag_form_post.apply(forward(x), duals_post, step)
def obj_intermediate(x, duals_pre, duals_post):
return (lag_form_post.apply(forward(x), duals_post, step)
- lag_form_pre.apply(x, duals_pre, step))
def obj_last(x, duals_pre, duals_post):
del duals_post # unused
if softmax:
y = jax.nn.softmax(x)
else:
y = x
return forward(y) - lag_form_pre.apply(x, duals_pre, step)
if opt_instance.is_first:
return obj_first
elif opt_instance.is_last:
return obj_last
else:
return obj_intermediate
def init_duals(
self,
boundprop_transform: bound_propagation.BoundTransform,
spec_type: verify_utils.SpecType,
affine_before_relu: bool,
spec_fn: Callable[..., jnp.array],
key: jnp.array,
lagrangian_form_per_layer: Iterable[LagrangianForm],
*input_bounds: Nest[graph_traversal.GraphInput],
) -> Tuple[Dict[int, DualOp], Params, ParamsTypes]:
"""Initialize the dual parameters and their types (Inequality vs Equality).
Args:
boundprop_transform: Underlying bound propagation method.
spec_type: Type of specification, adversarial robustness, uncertainty.
affine_before_relu: whether layer ordering uses the affine layer before
the ReLU.
spec_fn: Specification function to bound above.
key: PRNGKey used while initializing trainable params.
lagrangian_form_per_layer: Sequence of LagrangianForm
instances whose 'init_params' function initialises the parameters of
the layer's functional Lagrangian.
*input_bounds: Interval bounds on the inputs of `spec_fn`.
Returns:
env: Lagrangian computations for each contributing graph node.
dual_params: lagrangian parameters as 'outer', dummy params as 'inner'.
dual_params_types: constraint types (inequality vs equality) for
'outer' and 'inner', governing whether to project.
"""
# Analyse the graph, propagating (or applying) bounds along the way.
_, env = bound_propagation.bound_propagation(
bound_propagation.ForwardPropagationAlgorithm(
_LagrangianTransform(boundprop_transform)),
spec_fn, *input_bounds)
env = {
op.name: op for op in env.values()
if isinstance(op, DualOp)}
make_equality_constraint = lambda s: sdp_utils.DualVarTypes.EQUALITY
# initialize outer variables and types
lagrangian_form = {}
lagrange_params = {}
lagrangian_form_iter = iter(lagrangian_form_per_layer)
for name, op in env.items():
if op.is_relu:
lagrangian_form[name] = next(lagrangian_form_iter)
key, layer_key = jax.random.split(key, 2)
lagrange_params[name] = lagrangian_form[name].init_params(
layer_key, op.shape[1:])
elif op.is_input or op.name == max(env):
# special case for first and last layers
lagrangian_form[name] = None
lagrange_params[name] = None
lagrange_params_types = jax.tree_map(
make_equality_constraint, lagrange_params)
inner_problems = _enumerate_inner_max_problems(
env, lagrangian_form, lagrange_params, spec_type, affine_before_relu)
# Initialize inner variables and types
inner_params = []
inner_params_types = []
for inner_problem in inner_problems:
layer_inner_params, layer_inner_params_types = (
self.init_layer_inner_params(inner_problem))
inner_params.append(layer_inner_params)
inner_params_types.append(layer_inner_params_types)
dual_params = Params(inner=inner_params, outer=lagrange_params)
dual_params_types = ParamsTypes(
inner=inner_params_types, outer=lagrange_params_types,
lagrangian_form=lagrangian_form)
return env, dual_params, dual_params_types
@abc.abstractmethod
def init_layer_inner_params(
self, opt_instance: verify_utils.InnerVerifInstance) -> Tuple[Any, Any]:
"""Initialises duals and their types for a single inner maximisation.
Args:
opt_instance: The context (nearby bounds and outer duals) for the
layer's inner maximisation problem.
Returns:
inner_params: parameters for the 'inner' optimisation.
inner_params_types: constraint types (inequality vs equality) for
the 'inner' optimisation, governing whether to project.
"""
def project_dual(dual_params: Params,
dual_params_types: ParamsTypes) -> Params:
"""Project the dual variables."""
projected_inner_vars = sdp_verify.project_duals(dual_params.inner,
dual_params_types.inner)
projected_outer_vars = sdp_verify.project_duals(dual_params.outer,
dual_params_types.outer)
new_dual_params = dual_params._replace(
inner=projected_inner_vars, outer=projected_outer_vars)
return new_dual_params
def build_dual_fun(
env: Dict[int, DualOp],
lagrangian_form: Dict[int, LagrangianForm],
inner_opt: InnerMaxStrategy,
affine_before_relu: bool,
spec_type: verify_utils.SpecType,
merge_problems: Optional[Dict[int, int]] = None,
) -> Callable[[Params, jnp.array, int], jnp.array]:
"""Build the dual function that takes as input the inner/outer lagrangian parameters.
Args:
env: Lagrangian computations for each contributing graph node.
lagrangian_form: Dictionary, keyed by layer index, of LagrangianForm
instances whose 'apply' function accepts hidden-layer activations and
the parameters for the functional lagrange multplier, and returns a scalar
value.
inner_opt: Inner optimisation strategy.
affine_before_relu: whether layer ordering uses the affine layer before
the ReLU.
spec_type: Specification type, adversarial or uncertainty specification.
merge_problems: the key of the dictionary corresponds to the index of the
layer to begin the merge, and the associated value corresponds to the
number of consecutive layers to be merged with it.
For example, `{0: 2, 2: 3}` will merge together layer 0 and 1,
as well as layers 2, 3 and 4.
Returns:
A function that is a (possibly proxy) upper bound on the verification
objective, and takes as input the inner and outer dual variables, and the
PRNG key.
"""
def dual_loss_fun(dual_params: Params,
key: jnp.array, step: int) -> jnp.array:
lagrange_params = dual_params.outer
inner_vars_list = dual_params.inner
inner_problems = _enumerate_inner_max_problems(
env, lagrangian_form, lagrange_params, spec_type, affine_before_relu)
if merge_problems:
inner_problems = _merge_specified_instances(
inner_problems, merge_problems)
# accumulate loss over inner optimization problems
loss = 0.0
stats = {}
for inner_problem, inner_vars in zip(inner_problems, inner_vars_list):
key, inner_key = jax.random.split(key, 2)
loss_inner_problem = inner_opt.solve_max(
inner_vars, inner_problem, key=inner_key, step=step)
assert loss_inner_problem.ndim == 1
# assuming batch_size of 1 for now
loss_inner_problem = jnp.reshape(loss_inner_problem, ())
stats[f'loss_problem_{inner_problem.idx}'] = loss_inner_problem
loss += loss_inner_problem
stats['loss'] = loss
return loss, stats
return dual_loss_fun
def _enumerate_inner_max_problems(
env: Dict[int, DualOp],
lagrangian_form: Dict[int, LagrangianForm],
lagrange_params: Dict[int, Any],
spec_type: verify_utils.SpecType,
affine_before_relu: bool,
) -> List[InnerVerifInstance]:
"""Enumerates the inner maximisation problems."""
# iteratively create inner problems: each innner problem links the
# output of a layer to the next
inner_problems = []
idx = 0
for op in env.values():
is_last = op.name == max(env)
if op.is_relu or is_last:
preact_op = env[op.relu_preact_name] if op.is_relu else op
# Search for the previous ReLU.
prev_op = preact_op
while not (prev_op.is_input or prev_op.is_relu):
input_ops = [io for io in prev_op.inputs if isinstance(io, DualOp)]
if len(input_ops) != 1:
raise NotImplementedError('Multi-input ops not currently supported.')
prev_op = input_ops[0]
prev_preact_op = prev_op.inputs[0] if prev_op.is_relu else None
# Lagrange parameters for the equality constraint just before the layer
lagrange_params_pre = lagrange_params[prev_op.name]
# Lagrange parameters for the equality constraint just after the layer
lagrange_params_post = lagrange_params[op.name]
# corresponding constraints (obtained via e.g. bound propagation)
bounds_pre = sdp_utils.IntBound(
lb_pre=(prev_preact_op.lower if prev_preact_op is not None
else prev_op.lower),
ub_pre=(prev_preact_op.upper if prev_preact_op is not None
else prev_op.upper),
lb=prev_op.lower, ub=prev_op.upper)
bounds_post = sdp_utils.IntBound(
lb_pre=None, ub_pre=None, # not needed
lb=op.lower, ub=op.upper)
lagrangian_form_pre = lagrangian_form[prev_op.name]
lagrangian_form_post = lagrangian_form[op.name]
# create inner optimization problem
opt_instance = verify_utils.InnerVerifInstance(
affine_fns=[preact_op.affine],
bounds=[bounds_pre, bounds_post],
is_first=(lagrange_params_pre is None), is_last=is_last,
lagrangian_form_pre=lagrangian_form_pre,
lagrangian_form_post=lagrangian_form_post,
lagrange_params_post=lagrange_params_post,
lagrange_params_pre=lagrange_params_pre,
idx=idx,
spec_type=spec_type, affine_before_relu=affine_before_relu)
# if not last layer, lagrange_params_post cannot be None
assert(opt_instance.is_last or
opt_instance.lagrange_params_post is not None)
inner_problems.append(opt_instance)
idx += 1
if spec_type == verify_utils.SpecType.UNCERTAINTY:
# Uncertainty spec has this layer as the logits layer
# is_last is used to treat this layer without relu when affine_before_relu
# flag is true
inner_problems[-2] = dataclasses.replace(inner_problems[-2], is_last=True)
return inner_problems
def _merge_specified_instances(
instances: Sequence[InnerVerifInstance],
merge_specification: Dict[int, int],
) -> Sequence[InnerVerifInstance]:
"""Merge instances according to the specified list of groups to merge."""
merged_instances = []
idx = 0
merge_specification = merge_specification.copy()
while idx < len(instances):
run_length = merge_specification.pop(idx, 1) # default to single
instances_to_merge = instances[idx:(idx+run_length)]
merged_instances.append(_merge_instances(*instances_to_merge))
idx += run_length
if idx > len(instances):
raise ValueError(
f'Invalid specification (index {idx} out of {len(instances)}).')
if merge_specification:
raise ValueError(
f'Unused entry in merge_specification: {merge_specification}.')
return merged_instances
def _merge_instances(
instance_first: InnerVerifInstance,
*instances_rest: InnerVerifInstance,
) -> InnerVerifInstance:
"""Merge InnerVerifInstances together."""
if not instances_rest:
return instance_first
else:
instance_second, *instances_rest = instances_rest
if (instance_first.lagrangian_form_post
is not instance_second.lagrangian_form_pre):
raise ValueError(
'Cannot merge InnerVerifInstances with different Lagrangian forms.')
merged_instance = dataclasses.replace(
instance_first,
affine_fns=(instance_first.affine_fns + instance_second.affine_fns),
bounds=(instance_first.bounds[:-1] + instance_second.bounds),
is_first=instance_first.is_first,
is_last=instance_second.is_last,
# the solver corresponding to the first idx is used if using mixed strat
)
return _merge_instances(merged_instance, *instances_rest)
def make_opt_and_num_steps(opt_config):
"""Get optax optimizer, and number of steps to run training for."""
if opt_config.anneal_lengths:
print('Using custom annealing schedule', opt_config.anneal_lengths)
steps_per_anneal = [int(x) for x in opt_config.anneal_lengths.split(',')]
assert len(steps_per_anneal) > 1, 'for no anneals, do not use this flag'
num_steps = sum(steps_per_anneal)
steps_per_anneal = steps_per_anneal[:-1]
num_anneals = len(steps_per_anneal)
anneal_steps = np.cumsum(steps_per_anneal)
else:
num_anneals = opt_config.num_anneals
num_steps = opt_config.steps_per_anneal * (1 + opt_config.num_anneals)
anneal_steps = [
opt_config.steps_per_anneal *
(i + 1) for i in range(opt_config.num_anneals)
]
anneal_steps = jnp.array(anneal_steps)
def lr_schedule(t):
cur_epoch = jnp.minimum(num_anneals,
jnp.sum(t > anneal_steps))
return opt_config.lr_init * jnp.float_power(opt_config.anneal_factor,
cur_epoch)
opt_class = getattr(optax, opt_config.opt_name)
base_opt = opt_class(1., **opt_config.opt_kwargs)
opt = optax.chain(base_opt, optax.scale_by_schedule(lr_schedule))
return opt, num_steps
|
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import os
import shutil
from migrate import exceptions
from migrate.versioning.repository import *
from migrate.versioning.script import *
from migrate.tests import fixture
from datetime import datetime
class TestRepository(fixture.Pathed):
def test_create(self):
"""Repositories are created successfully"""
path = self.tmp_repos()
name = 'repository_name'
# Creating a repository that doesn't exist should succeed
repo = Repository.create(path, name)
config_path = repo.config.path
manage_path = os.path.join(repo.path, 'manage.py')
self.assertTrue(repo)
# Files should actually be created
self.assertTrue(os.path.exists(path))
self.assertTrue(os.path.exists(config_path))
self.assertTrue(os.path.exists(manage_path))
# Can't create it again: it already exists
self.assertRaises(exceptions.PathFoundError, Repository.create, path, name)
return path
def test_load(self):
"""We should be able to load information about an existing repository"""
# Create a repository to load
path = self.test_create()
repos = Repository(path)
self.assertTrue(repos)
self.assertTrue(repos.config)
self.assertTrue(repos.config.get('db_settings', 'version_table'))
# version_table's default isn't none
self.assertNotEquals(repos.config.get('db_settings', 'version_table'), 'None')
def test_load_notfound(self):
"""Nonexistant repositories shouldn't be loaded"""
path = self.tmp_repos()
self.assertTrue(not os.path.exists(path))
self.assertRaises(exceptions.InvalidRepositoryError, Repository, path)
def test_load_invalid(self):
"""Invalid repos shouldn't be loaded"""
# Here, invalid=empty directory. There may be other conditions too,
# but we shouldn't need to test all of them
path = self.tmp_repos()
os.mkdir(path)
self.assertRaises(exceptions.InvalidRepositoryError, Repository, path)
class TestVersionedRepository(fixture.Pathed):
"""Tests on an existing repository with a single python script"""
def setUp(self):
super(TestVersionedRepository, self).setUp()
Repository.clear()
self.path_repos = self.tmp_repos()
Repository.create(self.path_repos, 'repository_name')
def test_version(self):
"""We should correctly detect the version of a repository"""
repos = Repository(self.path_repos)
# Get latest version, or detect if a specified version exists
self.assertEqual(repos.latest, 0)
# repos.latest isn't an integer, but a VerNum
# (so we can't just assume the following tests are correct)
self.assertTrue(repos.latest >= 0)
self.assertTrue(repos.latest < 1)
# Create a script and test again
repos.create_script('')
self.assertEqual(repos.latest, 1)
self.assertTrue(repos.latest >= 0)
self.assertTrue(repos.latest >= 1)
self.assertTrue(repos.latest < 2)
# Create a new script and test again
repos.create_script('')
self.assertEqual(repos.latest, 2)
self.assertTrue(repos.latest >= 0)
self.assertTrue(repos.latest >= 1)
self.assertTrue(repos.latest >= 2)
self.assertTrue(repos.latest < 3)
def test_timestmap_numbering_version(self):
repos = Repository(self.path_repos)
repos.config.set('db_settings', 'use_timestamp_numbering', 'True')
# Get latest version, or detect if a specified version exists
self.assertEqual(repos.latest, 0)
# repos.latest isn't an integer, but a VerNum
# (so we can't just assume the following tests are correct)
self.assertTrue(repos.latest >= 0)
self.assertTrue(repos.latest < 1)
# Create a script and test again
now = int(datetime.utcnow().strftime('%Y%m%d%H%M%S'))
repos.create_script('')
print repos.latest
self.assertEqual(repos.latest, now)
def test_source(self):
"""Get a script object by version number and view its source"""
# Load repository and commit script
repo = Repository(self.path_repos)
repo.create_script('')
repo.create_script_sql('postgres', 'foo bar')
# Source is valid: script must have an upgrade function
# (not a very thorough test, but should be plenty)
source = repo.version(1).script().source()
self.assertTrue(source.find('def upgrade') >= 0)
import pprint; pprint.pprint(repo.version(2).sql)
source = repo.version(2).script('postgres', 'upgrade').source()
self.assertEqual(source.strip(), '')
def test_latestversion(self):
"""Repository.version() (no params) returns the latest version"""
repos = Repository(self.path_repos)
repos.create_script('')
self.assertTrue(repos.version(repos.latest) is repos.version())
self.assertTrue(repos.version() is not None)
def test_changeset(self):
"""Repositories can create changesets properly"""
# Create a nonzero-version repository of empty scripts
repos = Repository(self.path_repos)
for i in range(10):
repos.create_script('')
def check_changeset(params, length):
"""Creates and verifies a changeset"""
changeset = repos.changeset('postgres', *params)
self.assertEqual(len(changeset), length)
self.assertTrue(isinstance(changeset, Changeset))
uniq = list()
# Changesets are iterable
for version, change in changeset:
self.assertTrue(isinstance(change, BaseScript))
# Changes aren't identical
self.assertTrue(id(change) not in uniq)
uniq.append(id(change))
return changeset
# Upgrade to a specified version...
cs = check_changeset((0, 10), 10)
self.assertEqual(cs.keys().pop(0),0 ) # 0 -> 1: index is starting version
self.assertEqual(cs.keys().pop(), 9) # 9 -> 10: index is starting version
self.assertEqual(cs.start, 0) # starting version
self.assertEqual(cs.end, 10) # ending version
check_changeset((0, 1), 1)
check_changeset((0, 5), 5)
check_changeset((0, 0), 0)
check_changeset((5, 5), 0)
check_changeset((10, 10), 0)
check_changeset((5, 10), 5)
# Can't request a changeset of higher version than this repository
self.assertRaises(Exception, repos.changeset, 'postgres', 5, 11)
self.assertRaises(Exception, repos.changeset, 'postgres', -1, 5)
# Upgrade to the latest version...
cs = check_changeset((0,), 10)
self.assertEqual(cs.keys().pop(0), 0)
self.assertEqual(cs.keys().pop(), 9)
self.assertEqual(cs.start, 0)
self.assertEqual(cs.end, 10)
check_changeset((1,), 9)
check_changeset((5,), 5)
check_changeset((9,), 1)
check_changeset((10,), 0)
# run changes
cs.run('postgres', 'upgrade')
# Can't request a changeset of higher/lower version than this repository
self.assertRaises(Exception, repos.changeset, 'postgres', 11)
self.assertRaises(Exception, repos.changeset, 'postgres', -1)
# Downgrade
cs = check_changeset((10, 0),10)
self.assertEqual(cs.keys().pop(0), 10) # 10 -> 9
self.assertEqual(cs.keys().pop(), 1) # 1 -> 0
self.assertEqual(cs.start, 10)
self.assertEqual(cs.end, 0)
check_changeset((10, 5), 5)
check_changeset((5, 0), 5)
def test_many_versions(self):
"""Test what happens when lots of versions are created"""
repos = Repository(self.path_repos)
for i in range(1001):
repos.create_script('')
# since we normally create 3 digit ones, let's see if we blow up
self.assertTrue(os.path.exists('%s/versions/1000.py' % self.path_repos))
self.assertTrue(os.path.exists('%s/versions/1001.py' % self.path_repos))
# TODO: test manage file
# TODO: test changeset
|
|
import logging
from fraud_detection.settings import FILE_DIRS
logger = logging.getLogger(__name__)
class DuplicateEdgeError(Exception):
def __init__(self, value):
self.value = value
def __str__(self):
return repr(self.value)
class NodeDoesntExistError(Exception):
def __init__(self, value):
self.value = value
def __str__(self):
return repr(self.value)
class Graph(object):
def __init__(self, structure=None):
if structure is None:
structure = dict()
self.__structure = structure
def __node(self, *args):
for n in args:
if not self.node_exist(n):
self.__structure[n] = set()
def add_node(self, **kwargs):
"""
Adding new node on graph
:param n: New node to adding a graph
:return: None
"""
logger.info("Adding new node on graph")
if 'nodes' in kwargs:
nodes = [n for n in kwargs.get('nodes')]
apply(self.__node, nodes)
elif 'node' in kwargs:
self.__node(kwargs.get('node'))
def __edge(self, e):
"""
:param e: An edge compound by two nodes
:return:
"""
if self.node_exist(e[0]) and self.node_exist(e[1]) and not self.edge_exist(e) and e[0] != e[1]:
self.__structure[e[0]].add(e[1])
self.__structure[e[1]].add(e[0])
elif self.edge_exist(e):
logger.error("Edge already exist")
raise DuplicateEdgeError("Edge already exist")
else:
logger.error("Node doesn't exist")
raise NodeDoesntExistError("Node(s) doesn't exist")
def add_edge(self, **kwargs):
"""
Connecting two nodes
:param e: A edge will be connected
:return: None
"""
logger.info("Adding new edge on graph")
if 'edge' in kwargs:
e = kwargs.get('edge')
self.__edge(e)
elif 'edges' in kwargs:
for e in kwargs.get('edges'):
self.__edge(e)
def node_exist(self, n):
"""
Check if node already exist in graph
:param n: An Node
:return: True if node already exist
"""
logger.info("Checking if node already exist")
return self.__structure.get(n) is not None
def edge_exist(self, e):
"""
Test if edge exist in graph
:param e: Tuple representing a edge
:return: True if edge exist or False if edge not exist
"""
logger.info("Checking if edge exist (if two nodes already connected)")
return self.is_connected(e[0], e[1])
def get_all_node(self):
"""
List all nodes
:return: A list representing all nodes on graph
"""
return self.__structure.keys()
def is_connected(self, n1, n2):
"""
Base function to check if edge exist, or if two nodes are in
the same network collision
:param n1: node 1
:param n2: node 2
:return: True if nodes are connected of false if not
"""
logger.info("Checking if two nodes are connected")
e1 = self.__structure.get(n1)
e2 = self.__structure.get(n2)
if n1 in e2 and n2 in e1:
return True
return False
def same_network(self, e):
"""
For answer if two nodes are in the same network collision,
not need know all network, only know if the nodes are connected
:param e: A tuple representing an edge of graph
:return: True if two nodes are in the same network collision
"""
logger.info("Checking if two nodes are in same network collision")
if self.node_exist(e[0]) and self.node_exist(e[1]):
if self.is_connected(e[0], e[1]):
return True
else:
return UtilsService.any(self.__structure[e[1]], lambda x: x in self.__structure[e[0]])
else:
logger.error("Node doesn't exist")
raise NodeDoesntExistError("Node doesn't exist")
def __str__(self):
return str(self.__structure)
class UtilsService(object):
@staticmethod
def load_graph():
logger.info("Load file")
nodes = list()
edges = list()
graph = Graph()
with open(FILE_DIRS, "r") as f:
lines = f.readlines()
for line in lines:
l = line.split(' ')
nodes.extend(map(lambda x: int(x), l))
edges.append(tuple([int(l[0]), int(l[1])]))
graph.add_node(nodes=list(set(nodes)))
graph.add_edge(edges=edges)
return graph
@staticmethod
def store_graph(edge):
logger.info("Wrinting new edge on graph")
with open(FILE_DIRS, "a+") as f:
f.write("\n{0} {1}".format(edge[0], edge[1]))
@staticmethod
def any(iterable, predicate):
for iter in iterable:
if predicate(iter):
return True
return False
@staticmethod
def all(iterable, predicate):
result = False
for iter in iterable:
result = result and predicate(iter)
return result
|
|
'''
Created on 10 Jul 2013
@author: jamie@botsofbitcoin.com
'''
#!/usr/bin/env python
# -*- encoding: utf-8 -*-
"""
Copyright (c) 2013, LocalBitcoins Oy
All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are met:
* Redistributions of source code must retain the above copyright
notice, this list of conditions and the following disclaimer.
* Redistributions in binary form must reproduce the above copyright
notice, this list of conditions and the following disclaimer in the
documentation and/or other materials provided with the distribution.
* Neither the name of the LocalBitcoins Oy nor the
names of its contributors may be used to endorse or promote products
derived from this software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
DISCLAIMED. IN NO EVENT SHALL LOCALBITCOINS OY BE LIABLE FOR ANY
DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""
import requests
import json
import logging
logging.basicConfig(level=logging.DEBUG)
try:
from bs4 import BeautifulSoup
except:
logging.warning("BeautifulSoup is required for editing ads using the unofficial HTML API")
hdr = {'Referer' : 'https://localbitcoins.com/'}
class LocalBitcoinsAPI():
def __init__(self, client_id=None, client_secret=None, username=None, password=None):
''' Set up your API Access Information
https://www.okpay.com/en/developers/interfaces/setup.html '''
if client_id == None:
self.client_id = "your details here"
else:
self.client_id = client_id
if client_secret == None:
self.client_secret = "your details here"
else:
self.client_secret = client_secret
if username == None:
self.username = "your details here"
else:
self.username = username
if password == None:
self.password = "your details here"
else:
self.password = password
print "Got creds"
print "Getting API session"
self.access_token = self.get_access_token()
print "Logged on to API session"
self.agent = requests.session()
self.agent_login()
self.csrftoken = self.agent.cookies['csrftoken']
print "Logged on to HTML session"
def get_access_token(self):
try:
logging.debug("Getting stored access token")
token_file = open("localbitcoins_token%s.txt" % self.username, "rb")
access_token = token_file.read()
logging.debug("Got stored access token")
return access_token
except IOError:
logging.debug("Getting new access token")
pass
token_response = requests.post(
"https://localbitcoins.com/oauth2/access_token/",
data={
"grant_type": "password",
"client_id": self.client_id,
"client_secret": self.client_secret,
"username": self.username,
"password": self.password,
"scope": "read+write"}).json()
logging.debug("Posted to oauth2 url")
print "Response", token_response
if "access_token" not in token_response:
logging.fatal("No key in response")
exit(1)
access_token = token_response["access_token"]
logging.debug("Got new access token")
with open("localbitcoins_token%s.txt" % self.username, "wb") as f:
f.write(access_token)
logging.debug("Saved access token.")
return access_token
def get_escrows(self):
logging.debug("Getting escrows")
r = requests.post(
'https://localbitcoins.com/api/escrows/',
data={'access_token': self.access_token})
return json.loads(r.text)
def release_escrow(self, release_url=None, escrow=None):
logging.debug('Releasing escrow')
if not release_url == None:
pass
elif not escrow == None:
release_url = escrow['actions']['release_url']
r = requests.post(release_url,
data={'access_token': self.access_token})
response = json.loads(r.text)
return response
def get_ads(self):
logging.debug('Getting ads')
r = requests.get(
'https://localbitcoins.com/api/ads/',
params={'access_token': self.access_token})
return json.loads(r.text)
def edit_ad(self, ad_id, visibility, min_amount, max_amount, price_equation):
logging.debug('Editing ad')
r = requests.get(
'https://localbitcoins.com/api/ad/%s/' % ad_id,
params={'visibility': visibility,
'min_amount': min_amount,
'max_amount': max_amount,
'price_equation': price_equation})
return r.text
def update_prices(self, price_equation, trade_type):
''' Pass in a price equation and type of ad you want it to apply to
and all your ads of that type will be updated. Returns a list of
responses to the edits '''
logging.debug('Upating prices')
ads = self.get_ads()['data']['ad_list']
response = []
for ad in ads:
data = ad['data']
if data['trade_type'] == trade_type:
response += [self.edit_ad(data['ad_id'], data['visibility'],
data['min_amount'], data['max_amount'],
data['price_equation'])]
return response
def agent_login(self):
''' Added function to log in allowing access to additional functions
not yet covered by the official API. These functions will be deprecated once
the official API covers them. '''
self.agent.get('https://localbitcoins.com/', verify=False)
csrftoken = self.agent.cookies['csrftoken']
self.agent.post('https://localbitcoins.com/accounts/login/',
data={'username': self.username,
'password': self.password,
'csrfmiddlewaretoken' : csrftoken},
headers=hdr, verify=False)
def delete_ads(self, start=0, end='inf'):
''' Unofficial API function '''
ads = self.get_ads()['data']['ad_list']
delete_ids = [ad['data']['ad_id'] for ad in ads
if ad['data']['ad_id'] >= start
and ad['data']['ad_id'] <= end]
response = []
for ad_id in delete_ids:
response += [self.delete_ad(ad_id)]
return response
def send_message(self, ad_url, message):
''' Unofficial API function '''
logging.debug('Sending message')
try:
post_data = {'msg': message}
post_data['csrfmiddlewaretoken'] = self.csrftoken
self.agent.post(ad_url, data=post_data, headers=hdr)
response = {'success': 1}
except Exception, e:
response = {'success': 0, 'error': e}
pass
return response
def delete_ad(self, ad_id):
''' Unofficial API function '''
logging.debug('Deleting ad')
try:
r = self.agent.get(
'https://localbitcoins.com/ads_delete/%s' % ad_id,
headers=hdr)
if "alert alert-success" in r.text:
response = {'success': 1, 'deleted_id': ad_id}
else:
response = {'success': 0}
except Exception, e:
response = {'success': 0, 'error': e}
return response
def clone_ad_html(self, ad_id, ad_trade_type, ad_online_provider, edits_dict=None):
''' Unofficial API function
Requires valid online provider and trade type parameters as they
are not present in the cloned ad'''
logging.debug('Cloning ad')
ad_url = 'https://localbitcoins.com/ads_edit/%s' % ad_id
ad = self.agent.get(ad_url, headers=hdr).text
soup = BeautifulSoup(ad)
post_data = _get_post_data(soup)
post_data = dict(post_data.items() + edits_dict.items())
post_data['csrfmiddlewaretoken'] = self.csrftoken
post_data['submit'] = 'Publish advertisement'
post_data['ad-trade_type'] = ad_trade_type
post_data['ad-online_provider'] = ad_online_provider
post_data['ad-contact_hours'] = post_data['ad-msg'][0].replace('Contact hours: ', '')
logging.debug(post_data)
new_ad_url = 'https://localbitcoins.com/advertise/'
try:
r = self.agent.post(new_ad_url, data=post_data, headers=hdr)
if "error" in r.text:
response = {'success': 0, 'error': 'Failed to clone ad'}
else:
response = {'ad_id': r.url.split('/')[-2]}
response['success'] = 1
except Exception, e:
response = {'success': 0, 'error': e}
return response
def edit_ad_html(self, ad_no, edits_dict=None):
''' Unofficial API function '''
logging.debug('Editing ad')
ad_url = 'https://localbitcoins.com/ads_edit/%s' % ad_no
ad = self.agent.get(ad_url, headers=hdr).text
soup = BeautifulSoup(ad)
post_data = _get_post_data(soup)
post_data = dict(post_data.items() + edits_dict.items())
try:
post_data['csrfmiddlewaretoken'] = self.csrftoken
r = self.agent.post(ad_url, data=post_data, headers=hdr)
if "alert alert-success" in r.text:
response = {'success': 1, 'edited_ad': ad_url}
elif "error" in r.text:
response = {'success': 0, 'error': 'Failed to upload ad'}
except Exception, e:
response = {'success': 0, 'error': e}
return response
def _get_post_data(soup):
inputs = soup.find_all(_required_inputs)
inputs_dict = {tag.get('name'): tag.get('value') for tag in inputs}
controls = soup.find_all('select')
controls_dict = _add_controls(controls)
text = soup.find_all('textarea')
text_dict = _add_text(text)
post_data = inputs_dict
post_data = dict(post_data.items() + controls_dict.items())
post_data = dict(post_data.items() + text_dict.items())
return post_data
def _required_inputs(tag):
discard_tags = ['csrfmiddlewaretoken', 'action_url', 'header', 'msg']
if tag.get('type') == 'hidden' and tag.get('name') in discard_tags:
return
if tag.get('type') == 'radio' and tag.get('checked') != 'checked':
return
elif tag.get('type') == 'checkbox' and tag.get('checked') != 'checked':
return
else:
return tag.name == 'input'
def _add_controls(controls):
controls_dict = {}
for c in controls:
if c['name'] == 'ad-online_provider':
controls_dict[c['name']] = c.find_all('option')[0]['value']
else:
controls_dict[c['name']] = c.find_all('option',
attrs={'selected': "selected"})[0]['value']
return controls_dict
def _add_text(text):
text_dict = {}
for t in text:
text_dict[t['name']] = (t.contents if len(t.contents) > 0 else "")
return text_dict
|
|
#!/usr/bin/env python
"""Tests for the flow."""
import time
from grr.client import actions
from grr.client import vfs
from grr.lib import access_control
from grr.lib import action_mocks
from grr.lib import aff4
from grr.lib import data_store
from grr.lib import flags
from grr.lib import flow
from grr.lib import flow_runner
from grr.lib import output_plugin
from grr.lib import queue_manager
from grr.lib import rdfvalue
from grr.lib import test_lib
from grr.lib import type_info
from grr.lib import utils
# For GetClientStats. pylint: disable=unused-import
from grr.lib.flows.general import administrative
# pylint: enable=unused-import
from grr.lib.flows.general import transfer
from grr.lib.rdfvalues import client as rdf_client
from grr.lib.rdfvalues import flows as rdf_flows
from grr.lib.rdfvalues import paths as rdf_paths
from grr.lib.rdfvalues import protodict as rdf_protodict
from grr.lib.rdfvalues import structs as rdf_structs
from grr.proto import tests_pb2
# pylint: mode=test
class FlowResponseSerialization(flow.GRRFlow):
"""Demonstrate saving responses in the flow."""
@flow.StateHandler(next_state="Response1")
def Start(self, unused_message=None):
self.CallClient("ReturnBlob",
rdf_client.EchoRequest(data="test"),
next_state="Response1")
@flow.StateHandler(next_state="Response2")
def Response1(self, messages):
"""Record the message id for testing."""
self.state.Register("messages", messages)
self.CallClient("ReturnBlob",
rdf_client.EchoRequest(data="test"),
next_state="Response2")
@flow.StateHandler()
def Response2(self, messages):
# We need to receive one response and it must be the same as that stored in
# the previous state.
if (len(list(messages)) != 1 or
messages.status.status != rdf_flows.GrrStatus.ReturnedStatus.OK or
list(messages) != list(self.state.messages)):
raise RuntimeError("Messages not serialized")
class NoRequestChildFlow(flow.GRRFlow):
"""This flow just returns and does not generate any requests."""
@flow.StateHandler()
def Start(self, unused_message):
return
class CallClientChildFlow(flow.GRRFlow):
"""This flow just returns and does not generate any requests."""
@flow.StateHandler()
def Start(self, unused_message):
self.CallClient("GetClientStats", next_state="End")
class NoRequestParentFlow(flow.GRRFlow):
child_flow = "NoRequestChildFlow"
@flow.StateHandler(next_state="End")
def Start(self, unused_message):
self.CallFlow(self.child_flow, next_state="End")
@flow.StateHandler()
def End(self, unused_message):
pass
class CallClientParentFlow(NoRequestParentFlow):
child_flow = "CallClientChildFlow"
class AdminOnlyChildFlow(CallClientChildFlow):
AUTHORIZED_LABELS = ["admin"]
class AdminOnlyParentFlow(NoRequestParentFlow):
child_flow = "AdminOnlyChildFlow"
class BasicFlowTest(test_lib.FlowTestsBaseclass):
pass
class FlowCreationTest(BasicFlowTest):
"""Test flow creation."""
def testInvalidClientId(self):
"""Should raise if the client_id is invalid."""
self.assertRaises(ValueError, flow.GRRFlow.StartFlow,
client_id="hello", flow_name="FlowOrderTest",
token=self.token)
def testUnknownArg(self):
"""Check that flows reject unknown args."""
self.assertRaises(type_info.UnknownArg, flow.GRRFlow.StartFlow,
client_id=self.client_id, flow_name="FlowOrderTest",
token=self.token, foobar=1)
def testTypeAttributeIsNotAppendedWhenFlowIsClosed(self):
session_id = flow.GRRFlow.StartFlow(
client_id=self.client_id, flow_name="FlowOrderTest", token=self.token)
flow_obj = aff4.FACTORY.Open(session_id, aff4_type="FlowOrderTest",
age=aff4.ALL_TIMES, mode="rw",
token=self.token)
flow_obj.Close()
flow_obj = aff4.FACTORY.Open(session_id, aff4_type="FlowOrderTest",
age=aff4.ALL_TIMES, token=self.token)
types = list(flow_obj.GetValuesForAttribute(flow_obj.Schema.TYPE))
self.assertEqual(len(types), 1)
def testFlowSerialization(self):
"""Check that we can unpickle flows."""
session_id = flow.GRRFlow.StartFlow(
client_id=self.client_id, flow_name="FlowOrderTest", token=self.token)
flow_obj = aff4.FACTORY.Open(session_id, aff4_type="FlowOrderTest",
age=aff4.ALL_TIMES, token=self.token)
self.assertEqual(flow_obj.__class__, test_lib.FlowOrderTest)
def testFlowSerialization2(self):
"""Check that we can unpickle flows."""
class TestClientMock(object):
in_rdfvalue = rdf_client.EchoRequest
out_rdfvalue = rdf_protodict.DataBlob
def __init__(self):
# Register us as an action plugin.
actions.ActionPlugin.classes["ReturnBlob"] = self
def ReturnBlob(self, unused_args):
return [rdf_protodict.DataBlob(integer=100)]
# Run the flow in the simulated way
for _ in test_lib.TestFlowHelper("FlowResponseSerialization",
TestClientMock(), token=self.token,
client_id=self.client_id):
pass
def testTerminate(self):
session_id = flow.GRRFlow.StartFlow(
client_id=self.client_id, flow_name="FlowOrderTest", token=self.token)
flow.GRRFlow.TerminateFlow(session_id, token=self.token)
flow_obj = aff4.FACTORY.Open(session_id, aff4_type="FlowOrderTest",
age=aff4.ALL_TIMES, token=self.token)
runner = flow_obj.GetRunner()
self.assertEqual(runner.IsRunning(), False)
self.assertEqual(runner.context.state,
rdf_flows.Flow.State.ERROR)
reason = "no reason"
session_id = flow.GRRFlow.StartFlow(
client_id=self.client_id, flow_name="FlowOrderTest", token=self.token)
flow.GRRFlow.TerminateFlow(session_id, reason=reason, token=self.token)
flow_obj = aff4.FACTORY.Open(session_id, aff4_type="FlowOrderTest",
age=aff4.ALL_TIMES, token=self.token)
runner = flow_obj.GetRunner()
self.assertEqual(runner.IsRunning(), False)
self.assertEqual(runner.context.state,
rdf_flows.Flow.State.ERROR)
self.assertTrue(reason in runner.context.status)
def testChildTermination(self):
session_id = flow.GRRFlow.StartFlow(
client_id=self.client_id, flow_name="CallClientParentFlow",
token=self.token)
# The child URN should be contained within the parent session_id URN.
flow_obj = aff4.FACTORY.Open(session_id, token=self.token)
children = list(flow_obj.ListChildren())
self.assertEqual(len(children), 1)
reason = "just so"
flow.GRRFlow.TerminateFlow(session_id, reason=reason, token=self.token)
flow_obj = aff4.FACTORY.Open(session_id,
aff4_type="CallClientParentFlow",
token=self.token)
runner = flow_obj.GetRunner()
self.assertEqual(runner.IsRunning(), False)
self.assertEqual(runner.context.state,
rdf_flows.Flow.State.ERROR)
self.assertTrue("user test" in runner.context.status)
self.assertTrue(reason in runner.context.status)
child = aff4.FACTORY.Open(children[0],
aff4_type="CallClientChildFlow",
token=self.token)
runner = child.GetRunner()
self.assertEqual(runner.IsRunning(), False)
self.assertEqual(runner.context.state,
rdf_flows.Flow.State.ERROR)
self.assertTrue("user test" in runner.context.status)
self.assertTrue("Parent flow terminated." in runner.context.status)
def testNotification(self):
session_id = flow.GRRFlow.StartFlow(
client_id=self.client_id, flow_name="FlowOrderTest", token=self.token)
with aff4.FACTORY.Open(session_id, aff4_type="FlowOrderTest",
age=aff4.ALL_TIMES, mode="rw",
token=self.token) as flow_obj:
msg = "Flow terminated due to error"
flow_obj.GetRunner().Notify("FlowStatus", session_id, msg)
user_fd = aff4.FACTORY.Open(rdfvalue.RDFURN("aff4:/users").Add(
self.token.username), mode="r", token=self.token)
notifications = user_fd.ShowNotifications(reset=False)
self.assertEqual(len(notifications), 1)
for notification in notifications:
self.assertTrue(notification.message.endswith(": " + msg))
self.assertEqual(notification.subject, rdfvalue.RDFURN(session_id))
def testFormatstringNotification(self):
session_id = flow.GRRFlow.StartFlow(
client_id=self.client_id, flow_name="FlowOrderTest", token=self.token)
with aff4.FACTORY.Open(session_id, aff4_type="FlowOrderTest",
age=aff4.ALL_TIMES, mode="rw",
token=self.token) as flow_obj:
runner = flow_obj.GetRunner()
# msg contains %s.
msg = "Flow reading %system% terminated due to error"
runner.Notify("FlowStatus", session_id, msg)
runner.Status(msg)
def testSendRepliesAttribute(self):
# Run the flow in the simulated way. Child's send_replies is set to False.
# Parent flow will raise if number of responses is > 0.
for _ in test_lib.TestFlowHelper(
"ParentFlowWithoutResponses", ClientMock(), client_id=self.client_id,
check_flow_errors=False, token=self.token,):
pass
self.assertEqual(ParentFlowWithoutResponses.success, True)
notifications = {}
def CollectNotifications(self, queue, notifications, **kwargs):
now = time.time()
for notification in notifications:
self.notifications.setdefault(notification.session_id, []).append(now)
self.old_notify(queue, notifications, **kwargs)
def testNoRequestChildFlowRace(self):
manager = queue_manager.QueueManager(token=self.token)
self.old_notify = manager._MultiNotifyQueue
with utils.Stubber(queue_manager.QueueManager, "_MultiNotifyQueue",
self.CollectNotifications):
session_id = flow.GRRFlow.StartFlow(
client_id=self.client_id, flow_name="NoRequestParentFlow",
token=self.token)
self.assertIn(session_id, self.notifications)
f = aff4.FACTORY.Open(session_id, token=self.token)
# Check that the first notification came in after the flow was created.
self.assertLess(int(f.Get(f.Schema.TYPE).age),
1e6 * min(self.notifications[session_id]),
"There was a notification for a flow before "
"the flow was created.")
def testCallClientChildFlowRace(self):
session_id = flow.GRRFlow.StartFlow(
client_id=self.client_id, flow_name="CallClientParentFlow",
token=self.token)
client_requests = data_store.DB.ResolveRegex(
self.client_id.Queue(), "task:.*", token=self.token)
self.assertEqual(len(client_requests), 1)
f = aff4.FACTORY.Open(session_id, token=self.token)
for (_, _, timestamp) in client_requests:
# Check that the client request was written after the flow was created.
self.assertLess(int(f.Get(f.Schema.TYPE).age), timestamp,
"The client request was issued before "
"the flow was created.")
def testFlowLogging(self):
"""Check that flows log correctly."""
flow_urn = None
for session_id in test_lib.TestFlowHelper("DummyLogFlow",
action_mocks.ActionMock(),
token=self.token,
client_id=self.client_id):
flow_urn = session_id
with aff4.FACTORY.Open(flow_urn.Add("Logs"), age=aff4.ALL_TIMES,
token=self.token) as log_collection:
count = 0
# Can't use len with PackedVersionCollection
for log in log_collection:
self.assertEqual(log.client_id, self.client_id)
self.assertTrue(log.log_message in ["First", "Second", "Third",
"Fourth", "Uno", "Dos", "Tres",
"Cuatro"])
self.assertTrue(log.flow_name in ["DummyLogFlow",
"DummyLogFlowChild"])
self.assertTrue(str(flow_urn) in str(log.urn))
count += 1
self.assertEqual(count, 8)
class FlowTest(BasicFlowTest):
"""Tests the Flow."""
def testBrokenFlow(self):
"""Check that flows which call to incorrect states raise."""
client_mock = action_mocks.ActionMock("ReadBuffer")
with self.assertRaises(RuntimeError):
for _ in test_lib.TestFlowHelper(
"BrokenFlow", client_mock, client_id=self.client_id,
check_flow_errors=True, token=self.token):
pass
def SendMessages(self, response_ids, session_id, authenticated=True,
args_rdf_name="DataBlob"):
"""Send messages to the flow."""
for response_id in response_ids:
message = rdf_flows.GrrMessage(
request_id=1,
response_id=response_id,
session_id=session_id,
args_rdf_name=args_rdf_name)
if authenticated:
auth_state = rdf_flows.GrrMessage.AuthorizationState.AUTHENTICATED
message.auth_state = auth_state
self.SendMessage(message)
def SendMessage(self, message):
# Now messages are set in the data store
with queue_manager.QueueManager(token=self.token) as manager:
manager.QueueResponse(message.session_id, message)
def SendOKStatus(self, response_id, session_id):
"""Send a message to the flow."""
message = rdf_flows.GrrMessage(
request_id=1,
response_id=response_id,
session_id=session_id,
type=rdf_flows.GrrMessage.Type.STATUS,
auth_state=rdf_flows.GrrMessage.AuthorizationState.AUTHENTICATED)
status = rdf_flows.GrrStatus(status=rdf_flows.GrrStatus.ReturnedStatus.OK)
message.payload = status
self.SendMessage(message)
# Now also set the state on the RequestState
request_state, _ = data_store.DB.Resolve(
message.session_id.Add("state"),
queue_manager.QueueManager.FLOW_REQUEST_TEMPLATE % message.request_id,
token=self.token)
request_state = rdf_flows.RequestState(request_state)
request_state.status = status
data_store.DB.Set(
message.session_id.Add("state"),
queue_manager.QueueManager.FLOW_REQUEST_TEMPLATE % message.request_id,
request_state, token=self.token)
return message
def testReordering(self):
"""Check that out of order client messages are reordered."""
flow_obj = self.FlowSetup("FlowOrderTest")
# Simultate processing messages arriving in random order
message_ids = [2, 1, 4, 3, 5]
self.SendMessages(message_ids, flow_obj.session_id)
# Send the status message
message = self.SendOKStatus(6, flow_obj.session_id)
runner = flow_runner.FlowRunner(flow_obj)
notification = rdf_flows.Notification(
timestamp=rdfvalue.RDFDatetime().Now())
runner.ProcessCompletedRequests(notification, [message])
# Check that the messages were processed in order
self.assertEqual(flow_obj.messages, [1, 2, 3, 4, 5])
def testCallClient(self):
"""Flows can send client messages using CallClient()."""
flow_obj = self.FlowSetup("FlowOrderTest")
# Check that a message went out to the client
manager = queue_manager.QueueManager(token=self.token)
tasks = manager.Query(self.client_id, limit=100)
self.assertEqual(len(tasks), 1)
message = tasks[0]
self.assertEqual(message.session_id, flow_obj.session_id)
self.assertEqual(message.request_id, 1)
self.assertEqual(message.name, "Test")
def testCallClientWellKnown(self):
"""Well known flows can also call the client."""
cls = flow.GRRFlow.classes["GetClientStatsAuto"]
flow_obj = cls(cls.well_known_session_id, mode="rw", token=self.token)
flow_obj.CallClient(self.client_id, "GetClientStats")
# Check that a message went out to the client
manager = queue_manager.QueueManager(token=self.token)
tasks = manager.Query(self.client_id, limit=100)
self.assertEqual(len(tasks), 1)
message = tasks[0]
# If we don't specify where to send the replies, they go to the devnull flow
devnull = flow.GRRFlow.classes["IgnoreResponses"]
self.assertEqual(message.session_id, devnull.well_known_session_id)
self.assertEqual(message.request_id, 0)
self.assertEqual(message.name, "GetClientStats")
messages = []
def StoreMessage(_, msg):
messages.append(msg)
with utils.Stubber(devnull, "ProcessMessage", StoreMessage):
client_mock = action_mocks.ActionMock("GetClientStats")
for _ in test_lib.TestFlowHelper(
"ClientActionRunner", client_mock, client_id=self.client_id,
action="GetClientStats", token=self.token):
pass
# Make sure the messages arrived.
self.assertEqual(len(messages), 1)
def testAuthentication1(self):
"""Test that flows refuse to processes unauthenticated messages."""
flow_obj = self.FlowSetup("FlowOrderTest")
# Simultate processing messages arriving in random order
message_ids = [2, 1, 4, 3, 5]
self.SendMessages(message_ids, flow_obj.session_id,
authenticated=False)
# Send the status message
message = self.SendOKStatus(6, flow_obj.session_id)
runner = flow_runner.FlowRunner(flow_obj)
notification = rdf_flows.Notification(
timestamp=rdfvalue.RDFDatetime().Now())
runner.ProcessCompletedRequests(notification, [message])
# Now messages should actually be processed
self.assertEqual(flow_obj.messages, [])
def testAuthentication2(self):
"""Test that flows refuse to processes unauthenticated messages.
Here we try to simulate an attacker injecting unauthenticated
messages midstream.
The current implementation actually fails to process the entire
flow since the injected messages displace the real ones if they
arrive earlier. This can be an effective DoS against legitimate
clients but would require attackers to guess session ids.
"""
flow_obj = self.FlowSetup("FlowOrderTest")
# Simultate processing messages arriving in random order
message_ids = [1, 2]
self.SendMessages(message_ids, flow_obj.session_id,
authenticated=True)
# Now suppose some of the messages are spoofed
message_ids = [3, 4, 5]
self.SendMessages(message_ids, flow_obj.session_id,
authenticated=False)
# And now our real messages arrive
message_ids = [5, 6]
self.SendMessages(message_ids, flow_obj.session_id,
authenticated=True)
# Send the status message
message = self.SendOKStatus(7, flow_obj.session_id)
runner = flow_runner.FlowRunner(flow_obj)
notification = rdf_flows.Notification(
timestamp=rdfvalue.RDFDatetime().Now())
runner.ProcessCompletedRequests(notification, [message])
# Some messages should actually be processed
self.assertEqual(flow_obj.messages, [1, 2, 5, 6])
def testWellKnownFlows(self):
"""Test the well known flows."""
test_flow = self.FlowSetup("WellKnownSessionTest")
# Make sure the session ID is well known
self.assertEqual(test_flow.session_id,
test_lib.WellKnownSessionTest.well_known_session_id)
# Messages to Well Known flows can be unauthenticated
messages = [
rdf_flows.GrrMessage(payload=rdfvalue.RDFInteger(i)) for i in range(10)]
for message in messages:
test_flow.ProcessMessage(message)
# The messages might be processed in arbitrary order
test_flow.messages.sort()
# Make sure that messages were processed even without a status
# message to complete the transaction (Well known flows do not
# have transactions or states - all messages always get to the
# ProcessMessage method):
self.assertEqual(test_flow.messages, range(10))
def testArgParsing(self):
"""Test that arguments can be extracted and annotated successfully."""
# Should raise on parsing default.
self.assertRaises(type_info.TypeValueError, flow.GRRFlow.StartFlow,
client_id=self.client_id, flow_name="BadArgsFlow1",
arg1=False, token=self.token)
# Should not raise now if we provide the correct type.
flow.GRRFlow.StartFlow(
client_id=self.client_id, flow_name="BadArgsFlow1",
arg1=rdf_paths.PathSpec(), token=self.token)
class FlowTerminationTest(BasicFlowTest):
"""Flow termination-related tests."""
def testFlowMarkedForTerminationTerminatesInStateHandler(self):
flow_obj = self.FlowSetup("FlowOrderTest")
flow.GRRFlow.MarkForTermination(flow_obj.urn, reason="because i can",
token=self.token)
def ProcessFlow():
for _ in test_lib.TestFlowHelper(
flow_obj.urn, client_id=self.client_id, token=self.token):
pass
self.assertRaisesRegexp(RuntimeError, "because i can", ProcessFlow)
class DummyFlowOutputPlugin(output_plugin.OutputPluginWithOutputStreams):
"""Dummy plugin that opens a dummy stream."""
num_calls = 0
num_responses = 0
def ProcessResponses(self, responses):
stream = self._CreateOutputStream("dummy")
stream.Write("dummy")
stream.Flush()
DummyFlowOutputPlugin.num_calls += 1
DummyFlowOutputPlugin.num_responses += len(list(responses))
class FailingDummyFlowOutputPlugin(output_plugin.OutputPlugin):
def ProcessResponses(self, unused_responses):
raise RuntimeError("Oh no!")
class LongRunningDummyFlowOutputPlugin(output_plugin.OutputPlugin):
num_calls = 0
def ProcessResponses(self, unused_responses):
LongRunningDummyFlowOutputPlugin.num_calls += 1
time.time = lambda: 100
class FlowOutputPluginsTest(BasicFlowTest):
def setUp(self):
super(FlowOutputPluginsTest, self).setUp()
DummyFlowOutputPlugin.num_calls = 0
DummyFlowOutputPlugin.num_responses = 0
def RunFlow(self, flow_name=None, plugins=None, flow_args=None,
client_mock=None):
runner_args = flow_runner.FlowRunnerArgs(flow_name=flow_name or "GetFile",
output_plugins=plugins)
if flow_args is None:
flow_args = transfer.GetFileArgs(
pathspec=rdf_paths.PathSpec(
path="/tmp/evil.txt",
pathtype=rdf_paths.PathSpec.PathType.OS))
if client_mock is None:
client_mock = test_lib.SampleHuntMock()
flow_urn = flow.GRRFlow.StartFlow(client_id=self.client_id,
args=flow_args,
runner_args=runner_args,
token=self.token)
for _ in test_lib.TestFlowHelper(flow_urn, client_mock=client_mock,
client_id=self.client_id,
token=self.token):
pass
return flow_urn
def testFlowWithoutOutputPluginsCompletes(self):
self.RunFlow()
def testFlowWithOutputPluginButWithoutResultsCompletes(self):
self.RunFlow(
flow_name="NoRequestParentFlow",
plugins=output_plugin.OutputPluginDescriptor(
plugin_name="DummyFlowOutputPlugin"))
self.assertEqual(DummyFlowOutputPlugin.num_calls, 0)
def testFlowWithOutputPluginProcessesResultsSuccessfully(self):
self.RunFlow(
plugins=output_plugin.OutputPluginDescriptor(
plugin_name="DummyFlowOutputPlugin"))
self.assertEqual(DummyFlowOutputPlugin.num_calls, 1)
self.assertEqual(DummyFlowOutputPlugin.num_responses, 1)
def testFlowLogsSuccessfulOutputPluginProcessing(self):
flow_urn = self.RunFlow(
plugins=output_plugin.OutputPluginDescriptor(
plugin_name="DummyFlowOutputPlugin"))
flow_obj = aff4.FACTORY.Open(flow_urn, token=self.token)
log_messages = [item.log_message for item in flow_obj.GetLog()]
self.assertTrue(
"Plugin DummyFlowOutputPlugin sucessfully processed 1 flow replies."
in log_messages)
def testFlowLogsFailedOutputPluginProcessing(self):
flow_urn = self.RunFlow(
plugins=output_plugin.OutputPluginDescriptor(
plugin_name="FailingDummyFlowOutputPlugin"))
flow_obj = aff4.FACTORY.Open(flow_urn, token=self.token)
log_messages = [item.log_message for item in flow_obj.GetLog()]
self.assertTrue(
"Plugin FailingDummyFlowOutputPlugin failed to process 1 replies "
"due to: Oh no!" in log_messages)
def testFlowDoesNotFailWhenOutputPluginFails(self):
flow_urn = self.RunFlow(
plugins=output_plugin.OutputPluginDescriptor(
plugin_name="FailingDummyFlowOutputPlugin"))
flow_obj = aff4.FACTORY.Open(flow_urn, token=self.token)
self.assertEqual(flow_obj.state.context.state, "TERMINATED")
def testFailingPluginDoesNotImpactOtherPlugins(self):
self.RunFlow(
plugins=[
output_plugin.OutputPluginDescriptor(
plugin_name="FailingDummyFlowOutputPlugin"),
output_plugin.OutputPluginDescriptor(
plugin_name="DummyFlowOutputPlugin")])
self.assertEqual(DummyFlowOutputPlugin.num_calls, 1)
self.assertEqual(DummyFlowOutputPlugin.num_responses, 1)
class NoClientListener(flow.EventListener): # pylint: disable=unused-variable
well_known_session_id = rdfvalue.SessionID(flow_name="test2")
EVENTS = ["TestEvent"]
received_events = []
@flow.EventHandler(auth_required=True)
def ProcessMessage(self, message=None, event=None):
# Store the results for later inspection.
self.__class__.received_events.append((message, event))
class ClientListener(flow.EventListener):
well_known_session_id = rdfvalue.SessionID(flow_name="test3")
EVENTS = ["TestEvent"]
received_events = []
@flow.EventHandler(auth_required=True, allow_client_access=True)
def ProcessMessage(self, message=None, event=None):
# Store the results for later inspection.
self.__class__.received_events.append((message, event))
class FlowDoneListener(flow.EventListener):
well_known_session_id = rdfvalue.SessionID(queue=rdfvalue.RDFURN("EV"),
flow_name="FlowDone")
EVENTS = ["Not used"]
received_events = []
@flow.EventHandler(auth_required=True)
def ProcessMessage(self, message=None, event=None):
_ = event
# Store the results for later inspection.
FlowDoneListener.received_events.append(message)
class GeneralFlowsTest(BasicFlowTest):
"""Tests some flows."""
def testCallState(self):
"""Test the ability to chain flows."""
CallStateFlow.success = False
# Run the flow in the simulated way
for _ in test_lib.TestFlowHelper("CallStateFlow", ClientMock(),
client_id=self.client_id,
token=self.token):
pass
self.assertEqual(CallStateFlow.success, True)
def Work(self, client_mock, worker_mock):
while True:
client_processed = client_mock.Next()
flows_run = []
for flow_run in worker_mock.Next():
flows_run.append(flow_run)
if client_processed == 0 and not flows_run:
break
def testDelayedCallState(self):
"""Tests the ability to delay a CallState invocation."""
with test_lib.FakeTime(10000):
client_mock = ClientMock()
client_mock = test_lib.MockClient(self.client_id, client_mock,
token=self.token)
worker_mock = test_lib.MockWorker(check_flow_errors=True,
token=self.token)
flow.GRRFlow.StartFlow(
client_id=self.client_id, flow_name="DelayedCallStateFlow",
token=self.token)
self.Work(client_mock, worker_mock)
# We should have done the first CallState so far.
self.assertEqual(DelayedCallStateFlow.flow_ran, 1)
with test_lib.FakeTime(10050):
# 50 seconds more is not enough.
self.Work(client_mock, worker_mock)
self.assertEqual(DelayedCallStateFlow.flow_ran, 1)
with test_lib.FakeTime(10100):
# But 100 is.
self.Work(client_mock, worker_mock)
self.assertEqual(DelayedCallStateFlow.flow_ran, 2)
def testChainedFlow(self):
"""Test the ability to chain flows."""
ParentFlow.success = False
# Run the flow in the simulated way
for _ in test_lib.TestFlowHelper("ParentFlow", ClientMock(),
client_id=self.client_id,
token=self.token):
pass
self.assertEqual(ParentFlow.success, True)
def testCreatorPropagation(self):
# Instantiate the flow using one username.
session_id = flow.GRRFlow.StartFlow(
client_id=self.client_id, flow_name="ParentFlow", sync=False,
token=access_control.ACLToken(username="original_user",
reason="testing"))
# Run the flow using another user ("test").
for _ in test_lib.TestFlowHelper(session_id, ClientMock(),
client_id=self.client_id,
token=self.token):
pass
self.assertEqual(ParentFlow.success, True)
subflows = list(aff4.FACTORY.Open(
session_id, token=self.token).ListChildren())
self.assertEqual(len(subflows), 1)
child_flow = aff4.FACTORY.Open(subflows[0], token=self.token)
self.assertEqual(child_flow.GetRunner().context.creator, "original_user")
def testFlowLabelChecking(self):
self.CreateUser("noadmin")
noadmin_token = access_control.ACLToken(username="noadmin",
reason="testing")
with self.assertRaises(access_control.UnauthorizedAccess):
for _ in test_lib.TestFlowHelper("AdminOnlyChildFlow", ClientMock(),
client_id=self.client_id,
token=noadmin_token, sync=False):
pass
with self.assertRaises(RuntimeError):
for _ in test_lib.TestFlowHelper("AdminOnlyParentFlow", ClientMock(),
client_id=self.client_id,
token=noadmin_token, sync=False):
pass
self.CreateAdminUser("adminuser")
admin_token = access_control.ACLToken(username="adminuser",
reason="testing")
session_id = flow.GRRFlow.StartFlow(
client_id=self.client_id, flow_name="AdminOnlyChildFlow", sync=False,
token=admin_token)
for _ in test_lib.TestFlowHelper(session_id, ClientMock(),
client_id=self.client_id,
token=noadmin_token):
pass
session_id = flow.GRRFlow.StartFlow(
client_id=self.client_id, flow_name="AdminOnlyParentFlow", sync=False,
token=admin_token)
for _ in test_lib.TestFlowHelper(session_id, ClientMock(),
client_id=self.client_id,
token=noadmin_token):
pass
def testBrokenChainedFlow(self):
"""Test that exceptions are properly handled in chain flows."""
BrokenParentFlow.success = False
# Run the flow in the simulated way
for _ in test_lib.TestFlowHelper(
"BrokenParentFlow", ClientMock(), client_id=self.client_id,
check_flow_errors=False, token=self.token):
pass
self.assertEqual(BrokenParentFlow.success, True)
def testIteratedDirectoryListing(self):
"""Test that the client iterator works."""
with test_lib.VFSOverrider(
rdf_paths.PathSpec.PathType.OS, MockVFSHandler):
path = "/"
# Run the flow in the simulated way
client_mock = action_mocks.ActionMock("IteratedListDirectory")
for _ in test_lib.TestFlowHelper(
"IteratedListDirectory", client_mock, client_id=self.client_id,
pathspec=rdf_paths.PathSpec(path="/",
pathtype=rdf_paths.PathSpec.PathType.OS),
token=self.token):
pass
fd = aff4.FACTORY.Open(self.client_id.Add("fs/os").Add(path),
token=self.token)
directory = [ch for ch in fd.OpenChildren()]
pb = rdf_paths.PathSpec(path=path,
pathtype=rdf_paths.PathSpec.PathType.OS)
directory2 = list(vfs.VFSOpen(pb).ListFiles())
directory.sort()
result = [x.Get(x.Schema.STAT) for x in directory]
# Make sure that the resulting directory is what it should be
for x, y in zip(result, directory2):
x.aff4path = None
self.assertEqual(x.st_mode, y.st_mode)
self.assertRDFValueEqual(x, y)
def testClientEventNotification(self):
"""Make sure that client events handled securely."""
ClientListener.received_events = []
NoClientListener.received_events = []
event = rdf_flows.GrrMessage(
source="C.1395c448a443c7d9",
auth_state=rdf_flows.GrrMessage.AuthorizationState.AUTHENTICATED)
event.payload = rdf_paths.PathSpec(path="foobar")
flow.Events.PublishEvent("TestEvent", event, token=self.token)
test_lib.MockWorker(token=self.token).Simulate()
# The same event should be sent to both listeners, but only the listener
# which accepts client messages should register it.
self.assertRDFValueEqual(ClientListener.received_events[0][0].payload,
event.payload)
self.assertEqual(NoClientListener.received_events, [])
def testFlowNotification(self):
FlowDoneListener.received_events = []
with test_lib.VFSOverrider(
rdf_paths.PathSpec.PathType.OS, MockVFSHandler):
path = rdf_paths.PathSpec(path="/",
pathtype=rdf_paths.PathSpec.PathType.OS)
# Run the flow in the simulated way
client_mock = action_mocks.ActionMock("IteratedListDirectory")
for _ in test_lib.TestFlowHelper(
"IteratedListDirectory", client_mock, client_id=self.client_id,
notification_urn=rdfvalue.SessionID(queue=rdfvalue.RDFURN("EV"),
flow_name="FlowDone"),
pathspec=path, token=self.token):
pass
# The event goes to an external queue so we need another worker.
worker = test_lib.MockWorker(queues=[rdfvalue.RDFURN("EV")],
token=self.token)
worker.Simulate()
self.assertEqual(len(FlowDoneListener.received_events), 1)
flow_event = FlowDoneListener.received_events[0].payload
self.assertEqual(flow_event.flow_name, "IteratedListDirectory")
self.assertEqual(flow_event.client_id, "aff4:/C.1000000000000000")
self.assertEqual(flow_event.status, rdf_flows.FlowNotification.Status.OK)
def testEventNotification(self):
"""Test that events are sent to listeners."""
NoClientListener.received_events = []
worker = test_lib.MockWorker(token=self.token)
event = rdf_flows.GrrMessage(
session_id=rdfvalue.SessionID(flow_name="SomeFlow"),
name="test message",
payload=rdf_paths.PathSpec(path="foobar", pathtype="TSK"),
source="aff4:/C.0000000000000001", auth_state="AUTHENTICATED")
# Not allowed to publish a message from a client..
flow.Events.PublishEvent("TestEvent", event, token=self.token)
worker.Simulate()
self.assertEqual(NoClientListener.received_events, [])
event.source = "Source"
# First make the message unauthenticated.
event.auth_state = rdf_flows.GrrMessage.AuthorizationState.UNAUTHENTICATED
# Publish the event.
flow.Events.PublishEvent("TestEvent", event, token=self.token)
worker.Simulate()
# This should not work - the unauthenticated message is dropped.
self.assertEqual(NoClientListener.received_events, [])
# Now make the message authenticated.
event.auth_state = rdf_flows.GrrMessage.AuthorizationState.AUTHENTICATED
# Publish the event.
flow.Events.PublishEvent("TestEvent", event, token=self.token)
worker.Simulate()
# This should now work:
self.assertEqual(len(NoClientListener.received_events), 1)
# Make sure the source is correctly propagated.
self.assertEqual(NoClientListener.received_events[0][0].source,
"aff4:/Source")
self.assertEqual(NoClientListener.received_events[0][1].path, "foobar")
NoClientListener.received_events = []
# Now schedule ten events at the same time.
for i in xrange(10):
event.source = "Source%d" % i
flow.Events.PublishEvent("TestEvent", event, token=self.token)
worker.Simulate()
self.assertEqual(len(NoClientListener.received_events), 10)
# Events do not have to be delivered in order so we sort them here for
# comparison.
NoClientListener.received_events.sort(key=lambda x: x[0].source)
for i in range(10):
self.assertEqual(NoClientListener.received_events[i][0].source,
"aff4:/Source%d" % i)
self.assertEqual(NoClientListener.received_events[i][1].path, "foobar")
def testClientPrioritization(self):
"""Test that flow priorities work on the client side."""
result = []
client_mock = PriorityClientMock(result)
client_mock = test_lib.MockClient(self.client_id, client_mock,
token=self.token)
worker_mock = test_lib.MockWorker(check_flow_errors=True,
token=self.token)
# Start some flows with different priorities.
args = [(rdf_flows.GrrMessage.Priority.LOW_PRIORITY, "low priority"),
(rdf_flows.GrrMessage.Priority.MEDIUM_PRIORITY, "medium priority"),
(rdf_flows.GrrMessage.Priority.LOW_PRIORITY, "low priority2"),
(rdf_flows.GrrMessage.Priority.HIGH_PRIORITY, "high priority"),
(rdf_flows.GrrMessage.Priority.MEDIUM_PRIORITY, "medium priority2")]
for (priority, msg) in args:
flow.GRRFlow.StartFlow(
client_id=self.client_id, flow_name="PriorityFlow", msg=msg,
priority=priority, token=self.token)
while True:
client_processed = client_mock.Next()
flows_run = []
for flow_run in worker_mock.Next():
flows_run.append(flow_run)
if client_processed == 0 and not flows_run:
break
# The flows should be run in order of priority.
self.assertEqual(result[0:1],
[u"high priority"])
self.assertEqual(sorted(result[1:3]),
[u"medium priority", u"medium priority2"])
self.assertEqual(sorted(result[3:5]),
[u"low priority", u"low priority2"])
def testWorkerPrioritization(self):
"""Test that flow priorities work on the worker side."""
result = []
client_mock = PriorityClientMock(result)
client_mock = test_lib.MockClient(self.client_id, client_mock,
token=self.token)
worker_mock = test_lib.MockWorker(check_flow_errors=True,
token=self.token)
# Start some flows with different priorities.
args = [(rdf_flows.GrrMessage.Priority.LOW_PRIORITY, "low priority"),
(rdf_flows.GrrMessage.Priority.MEDIUM_PRIORITY, "medium priority"),
(rdf_flows.GrrMessage.Priority.LOW_PRIORITY, "low priority2"),
(rdf_flows.GrrMessage.Priority.HIGH_PRIORITY, "high priority"),
(rdf_flows.GrrMessage.Priority.MEDIUM_PRIORITY, "medium priority2")]
server_result = []
PriorityFlow.storage = server_result
for (priority, msg) in args:
flow.GRRFlow.StartFlow(
client_id=self.client_id, flow_name="PriorityFlow", msg=msg,
priority=priority, token=self.token)
while True:
# Run all the clients first so workers have messages to choose from.
client_processed = 1
while client_processed:
client_processed = client_mock.Next()
# Now process the results, this should happen in the correct order.
flows_run = []
for flow_run in worker_mock.Next():
flows_run.append(flow_run)
if not flows_run:
break
# The flows should be run in order of priority.
self.assertEqual(server_result[0:1],
[u"high priority"])
self.assertEqual(sorted(server_result[1:3]),
[u"medium priority", u"medium priority2"])
self.assertEqual(sorted(server_result[3:5]),
[u"low priority", u"low priority2"])
class ResourcedWorker(test_lib.MockWorker):
USER_CPU = [1, 20, 5, 16]
SYSTEM_CPU = [4, 20, 2, 8]
NETWORK_BYTES = [180, 1000, 580, 2000]
class FlowLimitTests(BasicFlowTest):
def RunFlow(self, flow_name, **kwargs):
result = {}
client_mock = CPULimitClientMock(result)
client_mock = test_lib.MockClient(self.client_id, client_mock,
token=self.token)
worker_mock = ResourcedWorker(check_flow_errors=True,
token=self.token)
flow.GRRFlow.StartFlow(
client_id=self.client_id, flow_name=flow_name,
token=self.token, **kwargs)
while True:
client_processed = client_mock.Next()
flows_run = []
for flow_run in worker_mock.Next():
flows_run.append(flow_run)
if client_processed == 0 and not flows_run:
break
return result
def testNetworkLimit(self):
"""Tests that the network limit works."""
result = self.RunFlow("NetworkLimitFlow", network_bytes_limit=10000)
self.assertEqual(result["networklimit"], [10000, 9820, 8820, 8240])
def testCPULimit(self):
"""Tests that the cpu limit works."""
result = self.RunFlow("CPULimitFlow", cpu_limit=300)
self.assertEqual(result["cpulimit"], [300, 295, 255])
class MockVFSHandler(vfs.VFSHandler):
"""A mock VFS handler with fake files."""
children = []
for x in range(10):
child = rdf_client.StatEntry(pathspec=rdf_paths.PathSpec(
path="Foo%s" % x, pathtype=rdf_paths.PathSpec.PathType.OS))
children.append(child)
supported_pathtype = rdf_paths.PathSpec.PathType.OS
def __init__(self, base_fd, pathspec=None, progress_callback=None,
full_pathspec=None):
super(MockVFSHandler, self).__init__(
base_fd, pathspec=pathspec, progress_callback=progress_callback,
full_pathspec=full_pathspec)
self.pathspec.Append(pathspec)
def ListFiles(self):
return self.children
def IsDirectory(self):
return self.pathspec.path == "/"
class PriorityClientMock(object):
in_rdfvalue = rdf_protodict.DataBlob
def __init__(self, storage):
# Register us as an action plugin.
actions.ActionPlugin.classes["Store"] = self
self.storage = storage
def Store(self, data):
self.storage.append(self.in_rdfvalue(data).string)
return [rdf_protodict.DataBlob(string="Hello World")]
class PriorityFlowArgs(rdf_structs.RDFProtoStruct):
protobuf = tests_pb2.PriorityFlowArgs
class PriorityFlow(flow.GRRFlow):
"""This flow is used to test priorities."""
args_type = PriorityFlowArgs
storage = []
@flow.StateHandler(next_state="Done")
def Start(self):
self.CallClient("Store", string=self.args.msg, next_state="Done")
@flow.StateHandler()
def Done(self, responses):
_ = responses
self.storage.append(self.args.msg)
class CPULimitClientMock(object):
in_rdfvalue = rdf_protodict.DataBlob
def __init__(self, storage):
# Register us as an action plugin.
actions.ActionPlugin.classes["Store"] = self
self.storage = storage
def HandleMessage(self, message):
self.storage.setdefault("cpulimit", []).append(message.cpu_limit)
self.storage.setdefault("networklimit",
[]).append(message.network_bytes_limit)
class CPULimitFlow(flow.GRRFlow):
"""This flow is used to test the cpu limit."""
@flow.StateHandler(next_state="State1")
def Start(self):
self.CallClient("Store", string="Hey!", next_state="State1")
@flow.StateHandler(next_state="State2")
def State1(self):
self.CallClient("Store", string="Hey!", next_state="State2")
@flow.StateHandler(next_state="Done")
def State2(self):
self.CallClient("Store", string="Hey!", next_state="Done")
@flow.StateHandler()
def Done(self, responses):
pass
class NetworkLimitFlow(flow.GRRFlow):
"""This flow is used to test the network bytes limit."""
@flow.StateHandler(next_state="State1")
def Start(self):
self.CallClient("Store", next_state="State1")
@flow.StateHandler(next_state="State2")
def State1(self):
# The mock worker doesn't track usage so we add it here.
self.CallClient("Store", next_state="State2")
@flow.StateHandler(next_state="State3")
def State2(self):
self.CallClient("Store", next_state="State3")
@flow.StateHandler(next_state="Done")
def State3(self):
self.CallClient("Store", next_state="Done")
@flow.StateHandler()
def Done(self, responses):
pass
class ClientMock(object):
"""Mock of client actions."""
in_rdfvalue = None
out_rdfvalue = rdfvalue.RDFString
def __init__(self):
# Register us as an action plugin.
actions.ActionPlugin.classes["ReturnHello"] = self
def ReturnHello(self, _):
return [rdfvalue.RDFString("Hello World")]
class ChildFlow(flow.GRRFlow):
"""This flow will be called by our parent."""
@flow.StateHandler(next_state="ReceiveHello")
def Start(self):
self.CallClient("ReturnHello", next_state="ReceiveHello")
@flow.StateHandler()
def ReceiveHello(self, responses):
# Relay the client's message to our parent
for response in responses:
self.SendReply(rdfvalue.RDFString("Child received"))
self.SendReply(response)
class BrokenChildFlow(ChildFlow):
"""A broken flow which raises."""
@flow.StateHandler()
def ReceiveHello(self, responses):
raise IOError("Boo")
class ParentFlow(flow.GRRFlow):
"""This flow will launch a child flow."""
# This is a global flag which will be set when the flow runs.
success = False
@flow.StateHandler(next_state="ParentReceiveHello")
def Start(self):
# Call the child flow.
self.CallFlow("ChildFlow",
next_state="ParentReceiveHello")
@flow.StateHandler()
def ParentReceiveHello(self, responses):
responses = list(responses)
if (len(responses) != 2 or "Child" not in unicode(responses[0]) or
"Hello" not in unicode(responses[1])):
raise RuntimeError("Messages not passed to parent")
ParentFlow.success = True
class ParentFlowWithoutResponses(flow.GRRFlow):
"""This flow will launch a child flow."""
success = False
@flow.StateHandler(next_state="ParentReceiveHello")
def Start(self):
# Call the child flow.
self.CallFlow("ChildFlow",
send_replies=False,
next_state="ParentReceiveHello")
@flow.StateHandler()
def ParentReceiveHello(self, responses):
if responses:
raise RuntimeError("Messages are not expected to be passed to parent")
ParentFlowWithoutResponses.success = True
class BrokenParentFlow(flow.GRRFlow):
"""This flow will launch a broken child flow."""
# This is a global flag which will be set when the flow runs.
success = False
@flow.StateHandler(next_state="ReceiveHello")
def Start(self):
# Call the child flow.
self.CallFlow("BrokenChildFlow",
next_state="ReceiveHello")
@flow.StateHandler()
def ReceiveHello(self, responses):
if (responses or
responses.status.status == rdf_flows.GrrStatus.ReturnedStatus.OK):
raise RuntimeError("Error not propagated to parent")
BrokenParentFlow.success = True
class CallStateFlow(flow.GRRFlow):
"""A flow that calls one of its own states."""
# This is a global flag which will be set when the flow runs.
success = False
@flow.StateHandler(next_state="ReceiveHello")
def Start(self):
# Call the receive state.
self.CallState([rdfvalue.RDFString("Hello")],
next_state="ReceiveHello",
request_data={"test_req_data": 2})
@flow.StateHandler()
def ReceiveHello(self, responses):
if responses.First() != "Hello":
raise RuntimeError("Did not receive hello.")
if responses.request_data["test_req_data"] != 2:
raise RuntimeError("request_data did not propagate.")
CallStateFlow.success = True
class DelayedCallStateFlow(flow.GRRFlow):
"""A flow that calls one of its own states with a delay."""
# This is a global flag which will be set when the flow runs.
flow_ran = 0
@flow.StateHandler(next_state="ReceiveHello")
def Start(self):
# Call the child flow.
self.CallState([rdfvalue.RDFString("Hello")],
next_state="ReceiveHello")
@flow.StateHandler(next_state="DelayedHello")
def ReceiveHello(self, responses):
if responses.First() != "Hello":
raise RuntimeError("Did not receive hello.")
DelayedCallStateFlow.flow_ran = 1
# Call the child flow.
self.CallState([rdfvalue.RDFString("Hello")],
next_state="DelayedHello",
start_time=rdfvalue.RDFDatetime().Now() + 100)
@flow.StateHandler()
def DelayedHello(self, responses):
if responses.First() != "Hello":
raise RuntimeError("Did not receive hello.")
DelayedCallStateFlow.flow_ran = 2
class BadArgsFlow1Args(rdf_structs.RDFProtoStruct):
protobuf = tests_pb2.BadArgsFlow1Args
class BadArgsFlow1(flow.GRRFlow):
"""A flow that has args that mismatch type info."""
args_type = BadArgsFlow1Args
def main(argv):
# Run the full test suite
test_lib.GrrTestProgram(argv=argv)
if __name__ == "__main__":
flags.StartMain(main)
|
|
import sys
from Bio import Seq
from Bio import SeqIO
from Bio import SeqRecord
import pandas as pd
import numpy as np
import ms_module as ms
import re
############################
from Bio import Entrez
from Bio import SeqIO
from StringIO import StringIO
import time
from urllib2 import HTTPError # for Python 2
Entrez.email = "sergey.venev@umassmed.edu"
#
# # for these pair: there are peptides present in spec, but missing from pep isn't it odd?
# # suggesting, that different cutoffs were used for Scaffold program to output pep and spec ...
# pep_fname = "../raw_data/New_files_to_analyze/original_input_before_dec25/peptides.xls"
# spec_fname = "../raw_data/New_files_to_analyze/original_input_before_dec25/specs.xls"
#
# pept from spec are all in pep and all Peptide sequences are in
pep_fname = "../raw_data/New_files_to_analyze/011216 glycocapture 90-90/peptides.xls"
spec_fname = "../raw_data/New_files_to_analyze/011216 glycocapture 90-90/specs.xls"
#
pep_info = pd.read_csv(pep_fname,sep='\t')
spec_info = pd.read_csv(spec_fname,sep='\t')
#
# fasta = SeqIO.to_dict(SeqIO.parse(fasta_fname,"fasta"),key_function=lambda _: _.id.split('|')[1])
# 1-BASED NOTATION FOR PROTEINS INDEXING ENFORCED ...
# pep_df = pd.read_csv(uniq_pept_fname)
# connection between peptide info and spectrum info to be established ...
spec_info['pept'] = spec_info['Peptide sequence'].str.upper()
#
parse_pname = lambda pn: pd.Series(ms.parse_prot_name(pn))
# add columns with the parsed information on the proteins ...
spec_info = spec_info.merge(spec_info['Protein name'].apply(parse_pname), left_index=True, right_index=True)
pep_info = pep_info.merge(pep_info['Protein name'].apply(parse_pname), left_index=True, right_index=True)
# SOME VALIDATIONS ...
spec_peps_in_peps = spec_info['pept'].isin(pep_info['Peptide sequence'])
if spec_peps_in_peps.all():
print "All peptides from spectrum file are present in the peptide summary file!"
print "Sound very logicall."
else:
print "There are some peptide from spectrum file that are not present in the peptide summary file:"
print spec_info[~spec_peps_in_peps]
print """That is a strange situation,
suggesting that different parameters were used in Scaffold
to generate peptide summary and spectrum file."""
print """We proceed dismissing thise fact,
and using all the gsite/peptides pairs present in spectrum,
thus assuming self-sufficiency of the spectrum file
and its prevalence over peptide summary.
In other words, there is nothing in the peptide summary file,
that cannot be deduced from the spectrum file.(? seem to be true, but is it general?)"""
##################################################################################################
pept_sum_prots = spec_info['Protein name'].unique()
spec_prots = pep_info['Protein name'].unique()
if pept_sum_prots.size == spec_prots.size:
print "Pept.summary and spectrum files are refferring to the same number of different protein names."
print "It is a good sign!"
else:
print "Pept.summary file is refferring to %d unique protein names."%pept_sum_prots.size
print "Spectrum file is refferring to %d unique protein names."%spec_prots.size
print "This is unexpected discrepancy: proceed using data stored in the spectrum file."
############################################
# columns that needs to be delivered ... #
############################################
# A gsites, 1 per line
# B pept, 1 per line
# B1 enzyme, G or T, derive from 'Biological sample category', like this: {'TrypsinSample1':'T','GluC_Sample2':'G'}
# C peptide_start, 1 per line accordingly
# D all_uids, REPLACE WITH col:H
# E prot_seq, try to get those from NCBI, not from UniProt ...
# F protein, ??? sequence, name or what???
# G uid_max, UID for major form instead or something like that ...
# H prot_name, parsed out human-readable name from 'Protein name'
# H1 gene_name, parsed out GN=xxx from 'Protein name'
# I uniq_peptide_count, discrad that column ...
# J pept_probability, output number not the string - this would be the criteria
# K gsites_predicted, OK
# L gsites_predicted_number, OK
# M gsite_start, beware of 0 or 1 type of indexing ...
# N,O,P - gsites AAs in separate columns
# M1, NOP combined, gsite sequence basically!
# Q signal, from GeneBank record on the protein, simply Y,N on whether there is a 'Signal' in gb.
# R signal_location, location of the signal from Q
# S tm_span, Y,N just for the fact of having TM span as a protein feature.
#
#
#
# # this would be all possible fields that you can search in the protein database ...
# print "These are the fields one can search in db='protein':"
# handle = Entrez.einfo(db='protein')
# prot_db_fields = Entrez.read(handle)
# for idx,field in enumerate(prot_db_fields['DbInfo']['FieldList']):
# print idx+1, field['Name'], field['FullName'], field['Description']
# handle.close()
#
# GENERATE AN ARRAY OF ACCESS TERMS:
term_func = lambda (gn,os): "RecName[Title] AND \"%s\"[Gene Name] AND \"%s\"[Organism]"%(gn,os)
# GNs of some proteins evaulted to NaN, skip them ...
spec_gn_notnull = spec_info[spec_info['GN'].notnull()]
gn_os_for_terms = spec_gn_notnull[['GN','OS']].drop_duplicates().reset_index(drop=True)
terms_array = gn_os_for_terms.apply(term_func,axis=1)
#
def search_term(term):
# once all set, try actually searching something ...
handle = Entrez.esearch(db="protein", term=term)
# handle=Entrez.esearch(db="protein",term="RecName[TITL] AND PPT1[GENE] AND \"Homo sapiens\"[ORGN]")
record = Entrez.read(handle)
handle.close()
#
# fetch_idlist.append(record['IdList'])
#
print "Sending request for: %s ..."%term
print "Results:",record['IdList']
#
# TRY SETTING A DELAY OR SOMETHING, IN CASE NCBI WOULD START COMPLAINING ...
# NCBI might not like the ever frequent requests to its servers - that's why!
time.sleep(500./1000.) # try to sleep for 100 miliseconds to make it easier on NCBI ...
return pd.Series(list(record['IdList']))
########################################
multicol_fetch_res = terms_array.apply(search_term)
# stacked - multiindex is in use, level=1 index is the column name from 'multicol_mfunc_result' ...
unrolled_fetch_res = multicol_fetch_res.stack()
# index is no longer uniq after the following operation ... we dropped inner indexing part.
# IMPORTANT: indexes correspond to those from the original df (use it for merging later) ...
unrolled_origfetch = pd.DataFrame(unrolled_fetch_res.reset_index(level=1,drop=True),columns=['fetchid',])
# merge unrolled_origindex (a single column with ambiguous index) with the original df ...
# 'unrolled_origindex' must be DataFrame to merge: Series are not mergeable for some reason ...
# the whole df is to be unrolled after the following operation.
unrolled_fetchdf = gn_os_for_terms.merge(unrolled_origfetch,left_index=True,right_index=True).reset_index(drop=True)
#
#
#
# """Simply looking into DSC2 and DSC3 case tell us that there have to be sophisticated algorithm to choose
# the best matching fetchid for each gene name. Simply taking the first answer wouldn't work the best."""
# TO BE CONTINUED !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
spec_info_with_fetch = spec_info.merge(unrolled_fetchdf[['GN','fetchid']],how='outer', on='GN').reset_index(drop=True)
# #########################
# # LOOKS LIKE UID IS A GREAT CANDIDATE TO DISCERN
# # BETWEEN RIGHT AND WRONG SEQUENCES RETURNED BY NCBI FOR A GENENAME REQUEST ...
# #################################
#############################
# # # TOUGH CHOISE ON WHICH PROTEIN TO FINALLY ASSIGN TO THE GN ...
# yet to be made ...
######################################
# OUTPUT THE UPDATED SPEC_INFO (WITH FETCHIDS) TO BE USED ON THE NEXT STAGE ...
print
print "FetchIDs are ready."
print "Storing updated spectrum file to include FetchIDs for every GeneName ..."
spec_info_with_fetch_fname = 'spec_info_with_fetch.csv'
spec_info_with_fetch.to_csv(spec_info_with_fetch_fname,index=False)
print "file is stored as %s"%spec_info_with_fetch_fname
print
#
#
print
print "Posting and fetching genebank records corresponding to the available FetchIDs from the Protein DB ..."
pulled_gb_recs_fname = "pulled_proteins.gb"
batch_size = 60
attempts_limit = 3
# THEN WE'D NEED TO DO POST AND ONLY AFTER EFETCH ...
search_results = Entrez.read( Entrez.epost("protein", id=",".join( unrolled_fetchdf['fetchid'].unique() )) )
webenv = search_results["WebEnv"]
query_key = search_results["QueryKey"]
# download results in batches using history and coockies ....
count = unrolled_fetchdf.shape[0]
out_handle = open(pulled_gb_recs_fname, "w")
for start in range(0, count, batch_size):
end = min(count, start+batch_size)
print("Going to download record %i to %i" % (start+1, end))
attempt = 0
while attempt < attempts_limit:
attempt += 1
try:
fetch_handle = Entrez.efetch(db="protein", rettype="gb", retmode="text",
retstart=start, retmax=batch_size,
webenv=webenv, query_key=query_key)
break # skip subsequent attempts is succeeded ...
except HTTPError as err:
if 500 <= err.code <= 599:
print("Received error from server %s" % err)
print("Attempt %d of %d"%(attempt,attempts_limit))
# attempt += 1
time.sleep(15)
else:
print "oh Shut! %d"%attempt
raise
data = fetch_handle.read()
fetch_handle.close()
out_handle.write(data)
out_handle.close()
#
print "Fetched genebank records are stored in %s."%pulled_gb_recs_fname
print "Check for BoPython gb consistency before processing ..."
print "THE END"
# #
# ############################
# results = []
# for idx in record['IdList']:
# print "fetching",idx
# handle = Entrez.efetch(db='protein',id=idx,rettype='gb',retmode='text')
# filelike = StringIO(handle.read())
# seqrec = SeqIO.read(filelike,format='gb')
# results.append( seqrec )
#########################################################################################
# # The features that Reid is asking about are accessible through:
# r0 = results[0]
# f4 = r0.features[4]
# f4.qualifiers['region_name']
|
|
# -*- coding: utf-8 -*-
"""
Created on Wed Jan 25 13:50:30 2017
@author: Owner
"""
from __future__ import print_function
import os
import numpy as np
np.random.seed(1337)
import matplotlib.pyplot as plt
import pandas as pd
from keras.preprocessing.text import Tokenizer
from keras.preprocessing.sequence import pad_sequences
from keras.utils.np_utils import to_categorical
from keras.layers import Dense, Input, Flatten
from keras.layers import Conv1D, MaxPooling1D, Embedding
from keras.models import Model
import sys
#from sklearn.cross_validation import train_test_split
BASE_DIR = os.path.dirname( __file__ )
GLOVE_DIR = BASE_DIR + '/glove.6B/'
TEXT_DATA_DIR = BASE_DIR + '/20_newsgroup/'
MAX_SEQUENCE_LENGTH = 1000
MAX_NB_WORDS = 20000
EMBEDDING_DIM = 100
VALIDATION_SPLIT = 0.2
# first, build index mapping words in the embeddings set
# to their embedding vector
print('Indexing word vectors.')
embeddings_index = {}
f = open(os.path.join(GLOVE_DIR, 'glove.6B.100d.txt'))
for line in f:
values = line.split()
word = values[0]
coefs = np.asarray(values[1:], dtype='float32')
embeddings_index[word] = coefs
f.close()
print('Found %s word vectors.' % len(embeddings_index))
# second, prepare text samples and their labels
print('Processing text dataset')
#load from csv
outputfolder=os.path.abspath(os.path.join(os.path.dirname( __file__ ), os.pardir, 'output'))
floc=os.path.abspath(os.path.join(outputfolder,'topconvstats.csv'))
topconvdfcopy=pd.read_csv(floc, sep='\t', encoding='utf-8',index_col=False)
if hasattr(topconvdfcopy, u'Unnamed: 0'): del topconvdfcopy['Unnamed: 0']#might be hiding poorly merge attempts
if hasattr(topconvdfcopy, u'Unnamed: 0.1'): del topconvdfcopy['Unnamed: 0.1']#might be hiding poorly merge attempts
if hasattr(topconvdfcopy, 'convid'): topconvdfcopy['convid']=topconvdfcopy['convid'].astype('unicode')#loading auto changes this to int
print ('Loaded file from ' + floc)
def expandtag(df,tagtype): #need to double check to see if truly duplicating properly--------------------------------------------------------
#use nested expandtag(expandtag(df,tagtype),tagtype) for both issue and school
if tagtype=='issue':
emptyrow=df[df['numissues']==0]#collect rows with issues equal to 0
filledrow=df[df['numissues']>0]#collect rows with issues greater than 1
elif tagtype=='school':
emptyrow=df[df['school']=='None']#collect rows with schools with none
filledrow=df[df['school']!='None']#collect rows with schools
#Build new df
newdf=[]
for index, row in filledrow.iterrows():
if type(row[tagtype])==unicode:
row[tagtype]=row[tagtype][1:-1].split(', ')
for multitag in row[tagtype]:
temprow=row.copy()#duplicate row
temprow[tagtype]=multitag#replace multi issue of duplicated row with single issue
newdf.append(temprow)
filledrow=pd.DataFrame(newdf)
expandeddf=emptyrow.append(filledrow) #recombine
expandeddf.sort_index(inplace=True) #sort
return expandeddf
dataset=expandtag(topconvdfcopy,'issue').copy()
dataset=dataset.reset_index()
dataset=dataset[['issue','firstmessage']]
#remove those with no messages
dataset=dataset[~(dataset.firstmessage=='None')]
#remove those with no tags
dataset=dataset[~(dataset.issue=='None')]
#print info of dataset
dataset.groupby('issue').describe()
dataset_length= dataset['issue'].map(lambda text: len(text))
#dataset_length.plot(bins=20, kind='hist')
dataset_length.describe()
dataset_distribution=dataset.groupby('issue').count().sort_values('firstmessage',ascending=False)
#data is too poorly conditioned and biased, use only top 6 and the rest put as Unknown <----- doesn't really improve results :(
issuetoclassify=['Login Help','Forward to School','Check In/Out','Admin','Portfolio','LFR','Unknown']
#issuetoclassify=['Login Help','Unknown']
def modissue(s,issuelist):
if s not in issuelist:
s='Unknown'
return s
dataset['issue']=dataset.issue.apply(lambda s: modissue(s,issuetoclassify))
#dataset.groupby('label').count()
issuename=issuetoclassify
issuename.sort()
#prep for keras
#prepare dictionary mapping for label name to numeric id
'''
issuename = []
with open(os.path.abspath(os.path.join(os.path.dirname( __file__ ), os.pardir,'issuelist.txt'))) as inputfile:
for line in inputfile:
issuename.append(line.strip())
'''
texts = [] # list of text samples
labels_index = pd.Series(sorted(issuename)).to_dict() # dictionary mapping label name to numeric id
index_labels={v: k for k, v in labels_index.iteritems()}
labels = [] # list of label ids
texts=dataset['firstmessage'].tolist()
texts = [s.encode('ascii', 'ignore') for s in texts]
labels=dataset['issue'].tolist()
labels = [index_labels[s] for s in labels]
'''
for name in sorted(os.listdir(TEXT_DATA_DIR)):
path = os.path.join(TEXT_DATA_DIR, name)
if os.path.isdir(path):
label_id = len(labels_index)
labels_index[name] = label_id
for fname in sorted(os.listdir(path)):
if fname.isdigit():
fpath = os.path.join(path, fname)
if sys.version_info < (3,):
f = open(fpath)
else:
f = open(fpath, encoding='latin-1')
texts.append(f.read())
f.close()
labels.append(label_id)
'''
print('Found %s texts.' % len(texts))
# finally, vectorize the text samples into a 2D integer tensor
tokenizer = Tokenizer(nb_words=MAX_NB_WORDS)
tokenizer.fit_on_texts(texts)
sequences = tokenizer.texts_to_sequences(texts)
word_index = tokenizer.word_index
print('Found %s unique tokens.' % len(word_index))
data = pad_sequences(sequences, maxlen=MAX_SEQUENCE_LENGTH)
labels = to_categorical(np.asarray(labels))
print('Shape of data tensor:', data.shape)
print('Shape of label tensor:', labels.shape)
# split the data into a training set and a validation set
indices = np.arange(data.shape[0])
np.random.shuffle(indices)
data = data[indices]
labels = labels[indices]
nb_validation_samples = int(VALIDATION_SPLIT * data.shape[0])
x_train = data[:-nb_validation_samples]
y_train = labels[:-nb_validation_samples]
x_val = data[-nb_validation_samples:]
y_val = labels[-nb_validation_samples:]
print('Preparing embedding matrix.')
# prepare embedding matrix
nb_words = min(MAX_NB_WORDS, len(word_index))
embedding_matrix = np.zeros((nb_words + 1, EMBEDDING_DIM))
for word, i in word_index.items():
if i > MAX_NB_WORDS:
continue
embedding_vector = embeddings_index.get(word)
if embedding_vector is not None:
# words not found in embedding index will be all-zeros.
embedding_matrix[i] = embedding_vector
# load pre-trained word embeddings into an Embedding layer
# note that we set trainable = False so as to keep the embeddings fixed
embedding_layer = Embedding(nb_words + 1,
EMBEDDING_DIM,
weights=[embedding_matrix],
input_length=MAX_SEQUENCE_LENGTH,
trainable=False)
print('Training model.')
# train a 1D convnet with global maxpooling
sequence_input = Input(shape=(MAX_SEQUENCE_LENGTH,), dtype='int32')
embedded_sequences = embedding_layer(sequence_input)
x = Conv1D(256, 5, activation='relu')(embedded_sequences)
x = MaxPooling1D(5)(x)
x = Conv1D(256, 5, activation='relu')(x)
x = MaxPooling1D(5)(x)
x = Conv1D(256, 5, activation='relu')(x)
x = MaxPooling1D(35)(x)
x = Flatten()(x)
x = Dense(256, activation='relu')(x)
preds = Dense(len(labels_index), activation='softmax')(x)
'''
from keras.models import Sequential
from keras.layers import Dropout, Activation
nb_classes=len(issuename)+1
sequence_input = Input(shape=(MAX_SEQUENCE_LENGTH,), dtype='int32')
embedded_sequences = embedding_layer(sequence_input)
model = Sequential()
model.add(embedded_sequences)
model.add(Dense(512))
model.add(Activation('relu'))
model.add(Dropout(0.5))
model.add(Dense(nb_classes))
model.add(Activation('softmax'))
'''
model = Model(sequence_input, preds)
model.compile(loss='categorical_crossentropy',
optimizer='rmsprop',
metrics=['acc'])
# Train model
model.fit(x_train, y_train, validation_data=(x_val, y_val),
nb_epoch=20, batch_size=128)
# Evaluate model
score, acc = model.evaluate(x_val, y_val, batch_size=128)
test=model.predict(x_val, batch_size=32, verbose=0)
print('Score: %1.4f' % score)
print('Accuracy: %1.4f' % acc)
def getmaxinnestedlist(s):
outlist=[]
for l in s:
maxval=max(l)
for idx,val in enumerate(l):
if val==maxval:
outlist.append(idx)
return outlist
testoutput_class=[issuename[i] for i in getmaxinnestedlist(test)]
testinput_class=[issuename[i] for i in getmaxinnestedlist(y_val)]
testdf=pd.DataFrame([testoutput_class,testinput_class],index={'output','input'}).transpose()
testdf['result']=testdf.output==testdf.input
#word embedding layer
|
|
import torch
import torch.nn as nn
from torch.autograd import Variable
from functools import reduce
class LambdaBase(nn.Sequential):
def __init__(self, fn, *args):
super(LambdaBase, self).__init__(*args)
self.lambda_func = fn
def forward_prepare(self, input):
output = []
for module in self._modules.values():
output.append(module(input))
return output if output else input
class Lambda(LambdaBase):
def forward(self, input):
return self.lambda_func(self.forward_prepare(input))
class LambdaMap(LambdaBase):
def forward(self, input):
return list(map(self.lambda_func,self.forward_prepare(input)))
class LambdaReduce(LambdaBase):
def forward(self, input):
return reduce(self.lambda_func,self.forward_prepare(input))
resnext_101_64x4d = nn.Sequential( # Sequential,
nn.Conv2d(3,64,(7, 7),(2, 2),(3, 3),1,1,bias=False),
nn.BatchNorm2d(64),
nn.ReLU(),
nn.MaxPool2d((3, 3),(2, 2),(1, 1)),
nn.Sequential( # Sequential,
nn.Sequential( # Sequential,
LambdaMap(lambda x: x, # ConcatTable,
nn.Sequential( # Sequential,
nn.Sequential( # Sequential,
nn.Conv2d(64,256,(1, 1),(1, 1),(0, 0),1,1,bias=False),
nn.BatchNorm2d(256),
nn.ReLU(),
nn.Conv2d(256,256,(3, 3),(1, 1),(1, 1),1,64,bias=False),
nn.BatchNorm2d(256),
nn.ReLU(),
),
nn.Conv2d(256,256,(1, 1),(1, 1),(0, 0),1,1,bias=False),
nn.BatchNorm2d(256),
),
nn.Sequential( # Sequential,
nn.Conv2d(64,256,(1, 1),(1, 1),(0, 0),1,1,bias=False),
nn.BatchNorm2d(256),
),
),
LambdaReduce(lambda x,y: x+y), # CAddTable,
nn.ReLU(),
),
nn.Sequential( # Sequential,
LambdaMap(lambda x: x, # ConcatTable,
nn.Sequential( # Sequential,
nn.Sequential( # Sequential,
nn.Conv2d(256,256,(1, 1),(1, 1),(0, 0),1,1,bias=False),
nn.BatchNorm2d(256),
nn.ReLU(),
nn.Conv2d(256,256,(3, 3),(1, 1),(1, 1),1,64,bias=False),
nn.BatchNorm2d(256),
nn.ReLU(),
),
nn.Conv2d(256,256,(1, 1),(1, 1),(0, 0),1,1,bias=False),
nn.BatchNorm2d(256),
),
Lambda(lambda x: x), # Identity,
),
LambdaReduce(lambda x,y: x+y), # CAddTable,
nn.ReLU(),
),
nn.Sequential( # Sequential,
LambdaMap(lambda x: x, # ConcatTable,
nn.Sequential( # Sequential,
nn.Sequential( # Sequential,
nn.Conv2d(256,256,(1, 1),(1, 1),(0, 0),1,1,bias=False),
nn.BatchNorm2d(256),
nn.ReLU(),
nn.Conv2d(256,256,(3, 3),(1, 1),(1, 1),1,64,bias=False),
nn.BatchNorm2d(256),
nn.ReLU(),
),
nn.Conv2d(256,256,(1, 1),(1, 1),(0, 0),1,1,bias=False),
nn.BatchNorm2d(256),
),
Lambda(lambda x: x), # Identity,
),
LambdaReduce(lambda x,y: x+y), # CAddTable,
nn.ReLU(),
),
),
nn.Sequential( # Sequential,
nn.Sequential( # Sequential,
LambdaMap(lambda x: x, # ConcatTable,
nn.Sequential( # Sequential,
nn.Sequential( # Sequential,
nn.Conv2d(256,512,(1, 1),(1, 1),(0, 0),1,1,bias=False),
nn.BatchNorm2d(512),
nn.ReLU(),
nn.Conv2d(512,512,(3, 3),(2, 2),(1, 1),1,64,bias=False),
nn.BatchNorm2d(512),
nn.ReLU(),
),
nn.Conv2d(512,512,(1, 1),(1, 1),(0, 0),1,1,bias=False),
nn.BatchNorm2d(512),
),
nn.Sequential( # Sequential,
nn.Conv2d(256,512,(1, 1),(2, 2),(0, 0),1,1,bias=False),
nn.BatchNorm2d(512),
),
),
LambdaReduce(lambda x,y: x+y), # CAddTable,
nn.ReLU(),
),
nn.Sequential( # Sequential,
LambdaMap(lambda x: x, # ConcatTable,
nn.Sequential( # Sequential,
nn.Sequential( # Sequential,
nn.Conv2d(512,512,(1, 1),(1, 1),(0, 0),1,1,bias=False),
nn.BatchNorm2d(512),
nn.ReLU(),
nn.Conv2d(512,512,(3, 3),(1, 1),(1, 1),1,64,bias=False),
nn.BatchNorm2d(512),
nn.ReLU(),
),
nn.Conv2d(512,512,(1, 1),(1, 1),(0, 0),1,1,bias=False),
nn.BatchNorm2d(512),
),
Lambda(lambda x: x), # Identity,
),
LambdaReduce(lambda x,y: x+y), # CAddTable,
nn.ReLU(),
),
nn.Sequential( # Sequential,
LambdaMap(lambda x: x, # ConcatTable,
nn.Sequential( # Sequential,
nn.Sequential( # Sequential,
nn.Conv2d(512,512,(1, 1),(1, 1),(0, 0),1,1,bias=False),
nn.BatchNorm2d(512),
nn.ReLU(),
nn.Conv2d(512,512,(3, 3),(1, 1),(1, 1),1,64,bias=False),
nn.BatchNorm2d(512),
nn.ReLU(),
),
nn.Conv2d(512,512,(1, 1),(1, 1),(0, 0),1,1,bias=False),
nn.BatchNorm2d(512),
),
Lambda(lambda x: x), # Identity,
),
LambdaReduce(lambda x,y: x+y), # CAddTable,
nn.ReLU(),
),
nn.Sequential( # Sequential,
LambdaMap(lambda x: x, # ConcatTable,
nn.Sequential( # Sequential,
nn.Sequential( # Sequential,
nn.Conv2d(512,512,(1, 1),(1, 1),(0, 0),1,1,bias=False),
nn.BatchNorm2d(512),
nn.ReLU(),
nn.Conv2d(512,512,(3, 3),(1, 1),(1, 1),1,64,bias=False),
nn.BatchNorm2d(512),
nn.ReLU(),
),
nn.Conv2d(512,512,(1, 1),(1, 1),(0, 0),1,1,bias=False),
nn.BatchNorm2d(512),
),
Lambda(lambda x: x), # Identity,
),
LambdaReduce(lambda x,y: x+y), # CAddTable,
nn.ReLU(),
),
),
nn.Sequential( # Sequential,
nn.Sequential( # Sequential,
LambdaMap(lambda x: x, # ConcatTable,
nn.Sequential( # Sequential,
nn.Sequential( # Sequential,
nn.Conv2d(512,1024,(1, 1),(1, 1),(0, 0),1,1,bias=False),
nn.BatchNorm2d(1024),
nn.ReLU(),
nn.Conv2d(1024,1024,(3, 3),(2, 2),(1, 1),1,64,bias=False),
nn.BatchNorm2d(1024),
nn.ReLU(),
),
nn.Conv2d(1024,1024,(1, 1),(1, 1),(0, 0),1,1,bias=False),
nn.BatchNorm2d(1024),
),
nn.Sequential( # Sequential,
nn.Conv2d(512,1024,(1, 1),(2, 2),(0, 0),1,1,bias=False),
nn.BatchNorm2d(1024),
),
),
LambdaReduce(lambda x,y: x+y), # CAddTable,
nn.ReLU(),
),
nn.Sequential( # Sequential,
LambdaMap(lambda x: x, # ConcatTable,
nn.Sequential( # Sequential,
nn.Sequential( # Sequential,
nn.Conv2d(1024,1024,(1, 1),(1, 1),(0, 0),1,1,bias=False),
nn.BatchNorm2d(1024),
nn.ReLU(),
nn.Conv2d(1024,1024,(3, 3),(1, 1),(1, 1),1,64,bias=False),
nn.BatchNorm2d(1024),
nn.ReLU(),
),
nn.Conv2d(1024,1024,(1, 1),(1, 1),(0, 0),1,1,bias=False),
nn.BatchNorm2d(1024),
),
Lambda(lambda x: x), # Identity,
),
LambdaReduce(lambda x,y: x+y), # CAddTable,
nn.ReLU(),
),
nn.Sequential( # Sequential,
LambdaMap(lambda x: x, # ConcatTable,
nn.Sequential( # Sequential,
nn.Sequential( # Sequential,
nn.Conv2d(1024,1024,(1, 1),(1, 1),(0, 0),1,1,bias=False),
nn.BatchNorm2d(1024),
nn.ReLU(),
nn.Conv2d(1024,1024,(3, 3),(1, 1),(1, 1),1,64,bias=False),
nn.BatchNorm2d(1024),
nn.ReLU(),
),
nn.Conv2d(1024,1024,(1, 1),(1, 1),(0, 0),1,1,bias=False),
nn.BatchNorm2d(1024),
),
Lambda(lambda x: x), # Identity,
),
LambdaReduce(lambda x,y: x+y), # CAddTable,
nn.ReLU(),
),
nn.Sequential( # Sequential,
LambdaMap(lambda x: x, # ConcatTable,
nn.Sequential( # Sequential,
nn.Sequential( # Sequential,
nn.Conv2d(1024,1024,(1, 1),(1, 1),(0, 0),1,1,bias=False),
nn.BatchNorm2d(1024),
nn.ReLU(),
nn.Conv2d(1024,1024,(3, 3),(1, 1),(1, 1),1,64,bias=False),
nn.BatchNorm2d(1024),
nn.ReLU(),
),
nn.Conv2d(1024,1024,(1, 1),(1, 1),(0, 0),1,1,bias=False),
nn.BatchNorm2d(1024),
),
Lambda(lambda x: x), # Identity,
),
LambdaReduce(lambda x,y: x+y), # CAddTable,
nn.ReLU(),
),
nn.Sequential( # Sequential,
LambdaMap(lambda x: x, # ConcatTable,
nn.Sequential( # Sequential,
nn.Sequential( # Sequential,
nn.Conv2d(1024,1024,(1, 1),(1, 1),(0, 0),1,1,bias=False),
nn.BatchNorm2d(1024),
nn.ReLU(),
nn.Conv2d(1024,1024,(3, 3),(1, 1),(1, 1),1,64,bias=False),
nn.BatchNorm2d(1024),
nn.ReLU(),
),
nn.Conv2d(1024,1024,(1, 1),(1, 1),(0, 0),1,1,bias=False),
nn.BatchNorm2d(1024),
),
Lambda(lambda x: x), # Identity,
),
LambdaReduce(lambda x,y: x+y), # CAddTable,
nn.ReLU(),
),
nn.Sequential( # Sequential,
LambdaMap(lambda x: x, # ConcatTable,
nn.Sequential( # Sequential,
nn.Sequential( # Sequential,
nn.Conv2d(1024,1024,(1, 1),(1, 1),(0, 0),1,1,bias=False),
nn.BatchNorm2d(1024),
nn.ReLU(),
nn.Conv2d(1024,1024,(3, 3),(1, 1),(1, 1),1,64,bias=False),
nn.BatchNorm2d(1024),
nn.ReLU(),
),
nn.Conv2d(1024,1024,(1, 1),(1, 1),(0, 0),1,1,bias=False),
nn.BatchNorm2d(1024),
),
Lambda(lambda x: x), # Identity,
),
LambdaReduce(lambda x,y: x+y), # CAddTable,
nn.ReLU(),
),
nn.Sequential( # Sequential,
LambdaMap(lambda x: x, # ConcatTable,
nn.Sequential( # Sequential,
nn.Sequential( # Sequential,
nn.Conv2d(1024,1024,(1, 1),(1, 1),(0, 0),1,1,bias=False),
nn.BatchNorm2d(1024),
nn.ReLU(),
nn.Conv2d(1024,1024,(3, 3),(1, 1),(1, 1),1,64,bias=False),
nn.BatchNorm2d(1024),
nn.ReLU(),
),
nn.Conv2d(1024,1024,(1, 1),(1, 1),(0, 0),1,1,bias=False),
nn.BatchNorm2d(1024),
),
Lambda(lambda x: x), # Identity,
),
LambdaReduce(lambda x,y: x+y), # CAddTable,
nn.ReLU(),
),
nn.Sequential( # Sequential,
LambdaMap(lambda x: x, # ConcatTable,
nn.Sequential( # Sequential,
nn.Sequential( # Sequential,
nn.Conv2d(1024,1024,(1, 1),(1, 1),(0, 0),1,1,bias=False),
nn.BatchNorm2d(1024),
nn.ReLU(),
nn.Conv2d(1024,1024,(3, 3),(1, 1),(1, 1),1,64,bias=False),
nn.BatchNorm2d(1024),
nn.ReLU(),
),
nn.Conv2d(1024,1024,(1, 1),(1, 1),(0, 0),1,1,bias=False),
nn.BatchNorm2d(1024),
),
Lambda(lambda x: x), # Identity,
),
LambdaReduce(lambda x,y: x+y), # CAddTable,
nn.ReLU(),
),
nn.Sequential( # Sequential,
LambdaMap(lambda x: x, # ConcatTable,
nn.Sequential( # Sequential,
nn.Sequential( # Sequential,
nn.Conv2d(1024,1024,(1, 1),(1, 1),(0, 0),1,1,bias=False),
nn.BatchNorm2d(1024),
nn.ReLU(),
nn.Conv2d(1024,1024,(3, 3),(1, 1),(1, 1),1,64,bias=False),
nn.BatchNorm2d(1024),
nn.ReLU(),
),
nn.Conv2d(1024,1024,(1, 1),(1, 1),(0, 0),1,1,bias=False),
nn.BatchNorm2d(1024),
),
Lambda(lambda x: x), # Identity,
),
LambdaReduce(lambda x,y: x+y), # CAddTable,
nn.ReLU(),
),
nn.Sequential( # Sequential,
LambdaMap(lambda x: x, # ConcatTable,
nn.Sequential( # Sequential,
nn.Sequential( # Sequential,
nn.Conv2d(1024,1024,(1, 1),(1, 1),(0, 0),1,1,bias=False),
nn.BatchNorm2d(1024),
nn.ReLU(),
nn.Conv2d(1024,1024,(3, 3),(1, 1),(1, 1),1,64,bias=False),
nn.BatchNorm2d(1024),
nn.ReLU(),
),
nn.Conv2d(1024,1024,(1, 1),(1, 1),(0, 0),1,1,bias=False),
nn.BatchNorm2d(1024),
),
Lambda(lambda x: x), # Identity,
),
LambdaReduce(lambda x,y: x+y), # CAddTable,
nn.ReLU(),
),
nn.Sequential( # Sequential,
LambdaMap(lambda x: x, # ConcatTable,
nn.Sequential( # Sequential,
nn.Sequential( # Sequential,
nn.Conv2d(1024,1024,(1, 1),(1, 1),(0, 0),1,1,bias=False),
nn.BatchNorm2d(1024),
nn.ReLU(),
nn.Conv2d(1024,1024,(3, 3),(1, 1),(1, 1),1,64,bias=False),
nn.BatchNorm2d(1024),
nn.ReLU(),
),
nn.Conv2d(1024,1024,(1, 1),(1, 1),(0, 0),1,1,bias=False),
nn.BatchNorm2d(1024),
),
Lambda(lambda x: x), # Identity,
),
LambdaReduce(lambda x,y: x+y), # CAddTable,
nn.ReLU(),
),
nn.Sequential( # Sequential,
LambdaMap(lambda x: x, # ConcatTable,
nn.Sequential( # Sequential,
nn.Sequential( # Sequential,
nn.Conv2d(1024,1024,(1, 1),(1, 1),(0, 0),1,1,bias=False),
nn.BatchNorm2d(1024),
nn.ReLU(),
nn.Conv2d(1024,1024,(3, 3),(1, 1),(1, 1),1,64,bias=False),
nn.BatchNorm2d(1024),
nn.ReLU(),
),
nn.Conv2d(1024,1024,(1, 1),(1, 1),(0, 0),1,1,bias=False),
nn.BatchNorm2d(1024),
),
Lambda(lambda x: x), # Identity,
),
LambdaReduce(lambda x,y: x+y), # CAddTable,
nn.ReLU(),
),
nn.Sequential( # Sequential,
LambdaMap(lambda x: x, # ConcatTable,
nn.Sequential( # Sequential,
nn.Sequential( # Sequential,
nn.Conv2d(1024,1024,(1, 1),(1, 1),(0, 0),1,1,bias=False),
nn.BatchNorm2d(1024),
nn.ReLU(),
nn.Conv2d(1024,1024,(3, 3),(1, 1),(1, 1),1,64,bias=False),
nn.BatchNorm2d(1024),
nn.ReLU(),
),
nn.Conv2d(1024,1024,(1, 1),(1, 1),(0, 0),1,1,bias=False),
nn.BatchNorm2d(1024),
),
Lambda(lambda x: x), # Identity,
),
LambdaReduce(lambda x,y: x+y), # CAddTable,
nn.ReLU(),
),
nn.Sequential( # Sequential,
LambdaMap(lambda x: x, # ConcatTable,
nn.Sequential( # Sequential,
nn.Sequential( # Sequential,
nn.Conv2d(1024,1024,(1, 1),(1, 1),(0, 0),1,1,bias=False),
nn.BatchNorm2d(1024),
nn.ReLU(),
nn.Conv2d(1024,1024,(3, 3),(1, 1),(1, 1),1,64,bias=False),
nn.BatchNorm2d(1024),
nn.ReLU(),
),
nn.Conv2d(1024,1024,(1, 1),(1, 1),(0, 0),1,1,bias=False),
nn.BatchNorm2d(1024),
),
Lambda(lambda x: x), # Identity,
),
LambdaReduce(lambda x,y: x+y), # CAddTable,
nn.ReLU(),
),
nn.Sequential( # Sequential,
LambdaMap(lambda x: x, # ConcatTable,
nn.Sequential( # Sequential,
nn.Sequential( # Sequential,
nn.Conv2d(1024,1024,(1, 1),(1, 1),(0, 0),1,1,bias=False),
nn.BatchNorm2d(1024),
nn.ReLU(),
nn.Conv2d(1024,1024,(3, 3),(1, 1),(1, 1),1,64,bias=False),
nn.BatchNorm2d(1024),
nn.ReLU(),
),
nn.Conv2d(1024,1024,(1, 1),(1, 1),(0, 0),1,1,bias=False),
nn.BatchNorm2d(1024),
),
Lambda(lambda x: x), # Identity,
),
LambdaReduce(lambda x,y: x+y), # CAddTable,
nn.ReLU(),
),
nn.Sequential( # Sequential,
LambdaMap(lambda x: x, # ConcatTable,
nn.Sequential( # Sequential,
nn.Sequential( # Sequential,
nn.Conv2d(1024,1024,(1, 1),(1, 1),(0, 0),1,1,bias=False),
nn.BatchNorm2d(1024),
nn.ReLU(),
nn.Conv2d(1024,1024,(3, 3),(1, 1),(1, 1),1,64,bias=False),
nn.BatchNorm2d(1024),
nn.ReLU(),
),
nn.Conv2d(1024,1024,(1, 1),(1, 1),(0, 0),1,1,bias=False),
nn.BatchNorm2d(1024),
),
Lambda(lambda x: x), # Identity,
),
LambdaReduce(lambda x,y: x+y), # CAddTable,
nn.ReLU(),
),
nn.Sequential( # Sequential,
LambdaMap(lambda x: x, # ConcatTable,
nn.Sequential( # Sequential,
nn.Sequential( # Sequential,
nn.Conv2d(1024,1024,(1, 1),(1, 1),(0, 0),1,1,bias=False),
nn.BatchNorm2d(1024),
nn.ReLU(),
nn.Conv2d(1024,1024,(3, 3),(1, 1),(1, 1),1,64,bias=False),
nn.BatchNorm2d(1024),
nn.ReLU(),
),
nn.Conv2d(1024,1024,(1, 1),(1, 1),(0, 0),1,1,bias=False),
nn.BatchNorm2d(1024),
),
Lambda(lambda x: x), # Identity,
),
LambdaReduce(lambda x,y: x+y), # CAddTable,
nn.ReLU(),
),
nn.Sequential( # Sequential,
LambdaMap(lambda x: x, # ConcatTable,
nn.Sequential( # Sequential,
nn.Sequential( # Sequential,
nn.Conv2d(1024,1024,(1, 1),(1, 1),(0, 0),1,1,bias=False),
nn.BatchNorm2d(1024),
nn.ReLU(),
nn.Conv2d(1024,1024,(3, 3),(1, 1),(1, 1),1,64,bias=False),
nn.BatchNorm2d(1024),
nn.ReLU(),
),
nn.Conv2d(1024,1024,(1, 1),(1, 1),(0, 0),1,1,bias=False),
nn.BatchNorm2d(1024),
),
Lambda(lambda x: x), # Identity,
),
LambdaReduce(lambda x,y: x+y), # CAddTable,
nn.ReLU(),
),
nn.Sequential( # Sequential,
LambdaMap(lambda x: x, # ConcatTable,
nn.Sequential( # Sequential,
nn.Sequential( # Sequential,
nn.Conv2d(1024,1024,(1, 1),(1, 1),(0, 0),1,1,bias=False),
nn.BatchNorm2d(1024),
nn.ReLU(),
nn.Conv2d(1024,1024,(3, 3),(1, 1),(1, 1),1,64,bias=False),
nn.BatchNorm2d(1024),
nn.ReLU(),
),
nn.Conv2d(1024,1024,(1, 1),(1, 1),(0, 0),1,1,bias=False),
nn.BatchNorm2d(1024),
),
Lambda(lambda x: x), # Identity,
),
LambdaReduce(lambda x,y: x+y), # CAddTable,
nn.ReLU(),
),
nn.Sequential( # Sequential,
LambdaMap(lambda x: x, # ConcatTable,
nn.Sequential( # Sequential,
nn.Sequential( # Sequential,
nn.Conv2d(1024,1024,(1, 1),(1, 1),(0, 0),1,1,bias=False),
nn.BatchNorm2d(1024),
nn.ReLU(),
nn.Conv2d(1024,1024,(3, 3),(1, 1),(1, 1),1,64,bias=False),
nn.BatchNorm2d(1024),
nn.ReLU(),
),
nn.Conv2d(1024,1024,(1, 1),(1, 1),(0, 0),1,1,bias=False),
nn.BatchNorm2d(1024),
),
Lambda(lambda x: x), # Identity,
),
LambdaReduce(lambda x,y: x+y), # CAddTable,
nn.ReLU(),
),
nn.Sequential( # Sequential,
LambdaMap(lambda x: x, # ConcatTable,
nn.Sequential( # Sequential,
nn.Sequential( # Sequential,
nn.Conv2d(1024,1024,(1, 1),(1, 1),(0, 0),1,1,bias=False),
nn.BatchNorm2d(1024),
nn.ReLU(),
nn.Conv2d(1024,1024,(3, 3),(1, 1),(1, 1),1,64,bias=False),
nn.BatchNorm2d(1024),
nn.ReLU(),
),
nn.Conv2d(1024,1024,(1, 1),(1, 1),(0, 0),1,1,bias=False),
nn.BatchNorm2d(1024),
),
Lambda(lambda x: x), # Identity,
),
LambdaReduce(lambda x,y: x+y), # CAddTable,
nn.ReLU(),
),
nn.Sequential( # Sequential,
LambdaMap(lambda x: x, # ConcatTable,
nn.Sequential( # Sequential,
nn.Sequential( # Sequential,
nn.Conv2d(1024,1024,(1, 1),(1, 1),(0, 0),1,1,bias=False),
nn.BatchNorm2d(1024),
nn.ReLU(),
nn.Conv2d(1024,1024,(3, 3),(1, 1),(1, 1),1,64,bias=False),
nn.BatchNorm2d(1024),
nn.ReLU(),
),
nn.Conv2d(1024,1024,(1, 1),(1, 1),(0, 0),1,1,bias=False),
nn.BatchNorm2d(1024),
),
Lambda(lambda x: x), # Identity,
),
LambdaReduce(lambda x,y: x+y), # CAddTable,
nn.ReLU(),
),
nn.Sequential( # Sequential,
LambdaMap(lambda x: x, # ConcatTable,
nn.Sequential( # Sequential,
nn.Sequential( # Sequential,
nn.Conv2d(1024,1024,(1, 1),(1, 1),(0, 0),1,1,bias=False),
nn.BatchNorm2d(1024),
nn.ReLU(),
nn.Conv2d(1024,1024,(3, 3),(1, 1),(1, 1),1,64,bias=False),
nn.BatchNorm2d(1024),
nn.ReLU(),
),
nn.Conv2d(1024,1024,(1, 1),(1, 1),(0, 0),1,1,bias=False),
nn.BatchNorm2d(1024),
),
Lambda(lambda x: x), # Identity,
),
LambdaReduce(lambda x,y: x+y), # CAddTable,
nn.ReLU(),
),
),
nn.Sequential( # Sequential,
nn.Sequential( # Sequential,
LambdaMap(lambda x: x, # ConcatTable,
nn.Sequential( # Sequential,
nn.Sequential( # Sequential,
nn.Conv2d(1024,2048,(1, 1),(1, 1),(0, 0),1,1,bias=False),
nn.BatchNorm2d(2048),
nn.ReLU(),
nn.Conv2d(2048,2048,(3, 3),(2, 2),(1, 1),1,64,bias=False),
nn.BatchNorm2d(2048),
nn.ReLU(),
),
nn.Conv2d(2048,2048,(1, 1),(1, 1),(0, 0),1,1,bias=False),
nn.BatchNorm2d(2048),
),
nn.Sequential( # Sequential,
nn.Conv2d(1024,2048,(1, 1),(2, 2),(0, 0),1,1,bias=False),
nn.BatchNorm2d(2048),
),
),
LambdaReduce(lambda x,y: x+y), # CAddTable,
nn.ReLU(),
),
nn.Sequential( # Sequential,
LambdaMap(lambda x: x, # ConcatTable,
nn.Sequential( # Sequential,
nn.Sequential( # Sequential,
nn.Conv2d(2048,2048,(1, 1),(1, 1),(0, 0),1,1,bias=False),
nn.BatchNorm2d(2048),
nn.ReLU(),
nn.Conv2d(2048,2048,(3, 3),(1, 1),(1, 1),1,64,bias=False),
nn.BatchNorm2d(2048),
nn.ReLU(),
),
nn.Conv2d(2048,2048,(1, 1),(1, 1),(0, 0),1,1,bias=False),
nn.BatchNorm2d(2048),
),
Lambda(lambda x: x), # Identity,
),
LambdaReduce(lambda x,y: x+y), # CAddTable,
nn.ReLU(),
),
nn.Sequential( # Sequential,
LambdaMap(lambda x: x, # ConcatTable,
nn.Sequential( # Sequential,
nn.Sequential( # Sequential,
nn.Conv2d(2048,2048,(1, 1),(1, 1),(0, 0),1,1,bias=False),
nn.BatchNorm2d(2048),
nn.ReLU(),
nn.Conv2d(2048,2048,(3, 3),(1, 1),(1, 1),1,64,bias=False),
nn.BatchNorm2d(2048),
nn.ReLU(),
),
nn.Conv2d(2048,2048,(1, 1),(1, 1),(0, 0),1,1,bias=False),
nn.BatchNorm2d(2048),
),
Lambda(lambda x: x), # Identity,
),
LambdaReduce(lambda x,y: x+y), # CAddTable,
nn.ReLU(),
),
),
nn.AvgPool2d((7, 7),(1, 1)),
Lambda(lambda x: x.view(x.size(0),-1)), # View,
nn.Sequential(Lambda(lambda x: x.view(1,-1) if 1==len(x.size()) else x ),nn.Linear(2048,1000)), # Linear,
)
|
|
# coding=utf-8
# Copyright 2022 The Google Research Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Audio generation example.
This script trains AO AR models on audio datasets.
"""
import copy
import functools
import time
from absl import logging
from clu import metric_writers
from clu import periodic_actions
import flax
import jax
import jax.numpy as jnp
import ml_collections
import numpy as np
import optax
from autoregressive_diffusion.experiments.audio import input_pipeline_sc09
from autoregressive_diffusion.experiments.audio import utils as train_utils
from autoregressive_diffusion.experiments.audio.arch import diff_wave
from autoregressive_diffusion.experiments.audio.model import arm
from autoregressive_diffusion.experiments.images import checkpoint
from autoregressive_diffusion.experiments.language import language_train_state
from autoregressive_diffusion.model.autoregressive_diffusion import ao_arm
from autoregressive_diffusion.model.autoregressive_diffusion import bit_ao
from autoregressive_diffusion.utils import util_fns
def train_step(rng, batch, state, model, config, learning_rate_fn):
"""Train for a single step."""
rng_return, rng = jax.random.split(rng)
rng = jax.random.fold_in(rng, jax.lax.axis_index('batch'))
def loss_fn(params):
# Outputs are: (elbo_per_t, ce_value, t) or (acc, None, None).
elbo_value, *extra = model.elbo(rng, params, batch['inputs'], train=True)
elbo_value = jnp.mean(elbo_value, axis=0)
loss = -elbo_value
if config.model != 'arm':
elbo_per_t, ce_value, t = extra
if config.ce_term > 0:
ce_value = extra[1]
ce_value = jnp.mean(ce_value, axis=0)
loss -= config.ce_term * ce_value
outputs = {'nelbo': -elbo_value,
'nelbo_per_t_batch': elbo_per_t,
't_batch': t,
'ce': -ce_value}
else:
acc = extra[0]
outputs = {'nelbo': -elbo_value, 'acc': acc}
return loss, outputs
lr = learning_rate_fn(state.step)
grad_fn = jax.value_and_grad(loss_fn, has_aux=True)
(loss, aux), grads = grad_fn(state.params)
grads = jax.lax.pmean(grads, axis_name='batch')
if config.clip_grad > 0:
grads, grad_norm = util_fns.clip_by_global_norm(
grads, clip_norm=config.clip_grad)
else:
grad_norm = util_fns.global_norm(grads)
state = state.apply_gradients(
grads=grads,
lr=lr,
ema_momentum=config.ema_momentum)
metrics = {
'lr': lr,
'grad_norm': grad_norm}
for name, value in aux.items():
if 'batch' in name:
metrics[name] = jax.lax.all_gather(value, axis_name='batch')
else:
metrics[name] = jax.lax.pmean(value, axis_name='batch')
metrics['loss'] = jax.lax.pmean(loss, axis_name='batch')
return state, metrics, rng_return
def eval_step(rng, batch, state, model):
"""Eval a single step."""
rng_return, rng = jax.random.split(rng)
rng = jax.random.fold_in(rng, jax.lax.axis_index('batch'))
elbo_value, *extra = model.elbo(
rng, state.ema_params, batch['inputs'], train=False)
outputs = {'nelbo': -elbo_value}
if model.config.model != 'arm':
_, ce_value, _ = extra
outputs['ce'] = -ce_value
else:
acc, _, _ = extra
outputs['acc'] = acc
# Carefuly account for batch masking.
batch_mask = batch.get(
'mask',
jnp.ones(batch['inputs'].shape[:1], dtype=jnp.bool_))
outputs = {k: jnp.sum(util_fns.apply_weight(v, batch_mask))
for k, v in outputs.items()}
outputs, denom = jax.lax.psum(
(outputs, jnp.sum(batch_mask)), axis_name='batch')
return outputs, denom, rng_return
def eval_model(
p_eval_step,
rng,
state,
it,
num_steps):
"""Eval for a number of steps."""
start_time = time.time()
batch_metrics, batch_denom = [], []
for step in range(num_steps):
with jax.profiler.StepTraceAnnotation('eval', step_num=step):
metrics, denom, rng = p_eval_step(rng, next(it), state)
# Better to leave metrics on device, and off-load after finishing epoch.
batch_metrics.append(metrics)
batch_denom.append(denom)
# Load to CPU.
batch_metrics, batch_denom = jax.device_get(
flax.jax_utils.unreplicate((batch_metrics, batch_denom)))
# Compute mean of metrics across each batch in epoch.
denom_np = np.sum(batch_denom)
metrics_np = {k: np.sum([metrics[k] for metrics in batch_metrics]) / denom_np
for k in batch_metrics[0] if 'batch' not in k}
logging.info('Eval took: %.3f seconds', time.time() - start_time)
return metrics_np, rng
def log_standard_metrics(
writer, step, *, train_metrics=None, eval_metrics=None):
"""Logs metrics using a metrics writer."""
metrics_dict = {}
if train_metrics:
metrics_dict.update(
{'train_' + k: v for k, v in train_metrics.items() if 'batch' not in k})
if eval_metrics:
metrics_dict.update(
{'eval_' + k: v for k, v in eval_metrics.items() if 'batch' not in k})
writer.write_scalars(step, metrics_dict)
def model_setup(init_rng, config):
"""Sets up the model and initializes params."""
def get_architecture(
num_input_classes, n_output_channels, num_steps, is_causal=False):
cfg = copy.deepcopy(config.arch.config)
cfg.max_time = num_steps
cfg.num_classes = num_input_classes
if config.arch.name == 'diff_wave':
cfg.output_features = n_output_channels
cfg.is_causal = is_causal
net = diff_wave.DiffWave(**cfg)
else:
raise ValueError(f'Unknown architecture requested: {config.arch.name}.')
return net
if config.model == 'ao_arm':
model = ao_arm.ArbitraryOrderARM.create(
config, get_architecture, absorbing_state=config.num_classes // 2)
elif config.model == 'bit_ao':
model = bit_ao.BitUpscaleAutoregressiveDiffusion.create(
config, get_architecture)
elif config.model == 'arm':
model = arm.ARM.create(config, get_architecture)
else:
raise ValueError(f'Unknown model {config.model}.')
tmp_x, tmp_t = (jnp.ones([1, *config.data_shape], dtype=jnp.int32),
jnp.ones([1]))
@functools.partial(jax.jit, backend='cpu')
def init():
return model.init_architecture(init_rng, tmp_x, tmp_t)
logging.info('Initializing neural network.')
variables = init()
return model, variables
def train_and_evaluate(
config,
work_dir,
try_checkpoint=True):
"""Execute model training and evaluation loop.
Args:
config: Hyperparameter configuration for training and evaluation.
work_dir: Directory where the tensorboard summaries are written to.
try_checkpoint: Should try to load checkpoint (usually enabled, practical
for debugging purposes to disable).
Returns:
The train state (which includes the `.params`).
"""
# Init rng key.
rng = jax.random.PRNGKey(config.seed)
data_rng, rng = jax.random.split(rng)
is_first_host = jax.process_index() == 0
if config.dataset.name.endswith('speech_commands09'):
ds, ds_metadata = input_pipeline_sc09.get_dataset(data_rng, config)
else:
raise ValueError(f'Unknown dataset {config.dataset.name}.')
# Immediately create infinite iterators.
it = jax.tree_map(util_fns.get_iterator, ds)
# TODO(agritsenko): Can we fix the ugly nested dicts?
config.data_shape = ds_metadata['train']['shape']['inputs'][2:]
config.num_classes = ds_metadata['train']['num_classes']
config.sample_rate = ds_metadata['train']['sample_rate']
writer = metric_writers.create_default_writer(
work_dir, just_logging=jax.process_index() > 0)
rng, init_rng = jax.random.split(rng)
model, variables = model_setup(init_rng, config)
# From now on we want different rng across hosts:
rng = jax.random.fold_in(rng, jax.process_index())
def tx_fn(lr):
return optax.adamw(
lr, b1=0.9, b2=config.beta2, eps=1e-08, eps_root=0.0,
weight_decay=config.weight_decay)
state = language_train_state.TrainState.create(
params=variables['params'], tx_fn=tx_fn)
start_step = None
if try_checkpoint:
state, start_step = checkpoint.restore_from_path(work_dir, state)
start_step = start_step or 0
# Use different rngs for train & eval.
rng_train, rng_eval, rng_sample = jax.random.split(rng, 3)
kl_tracker = util_fns.KLTracker(num_steps=model.num_steps)
kl_history = []
learning_rate_fn = train_utils.create_learning_rate_scheduler(
**config.learning_rate)
p_train_step = jax.pmap(
functools.partial(
train_step,
config=config,
learning_rate_fn=learning_rate_fn,
model=model),
axis_name='batch',
in_axes=(None, 0, 0),
out_axes=(0, 0, None),
donate_argnums=(2,))
# The only axes that are broadcasted are the in- and output rng key ones. The
# rng is the first arg, and the last return value.
p_eval_step = jax.pmap(
functools.partial(
eval_step,
model=model),
axis_name='batch',
in_axes=(None, 0, 0),
out_axes=(0, 0, None))
# Training length.
logging.info('Training will start from step %d', start_step)
# Replicate state.
state = flax.jax_utils.replicate(state)
# Setup hooks.
hooks = []
report_progress = periodic_actions.ReportProgress(
num_train_steps=config.num_train_steps, writer=writer)
if is_first_host:
hooks += [
report_progress,
periodic_actions.Profile(logdir=work_dir, num_profile_steps=5)
]
with metric_writers.ensure_flushes(writer):
batch_metrics = []
for step in range(start_step, config.num_train_steps):
logging.log_first_n(logging.INFO, f'Train step: {step}', 5)
with jax.profiler.StepTraceAnnotation('train', step_num=step):
state, metrics, rng_train = p_train_step(
rng_train,
next(it['train']),
state)
batch_metrics.append(metrics)
# Cycle though hooks.
for h in hooks:
h(step)
is_last_step = step == config.num_train_steps - 1
if (step % config.log_every_steps == 0) or is_last_step:
with report_progress.timed('training_metrics'):
################### Process batch metrics ############################
batch_metrics = jax.device_get(
flax.jax_utils.unreplicate(batch_metrics))
if 't_batch' in metrics:
# TODO(agritsenko): Factor out into a separate function.
# This processes the loss per t, although two nested for-loops
# (counting the one inside kl_tracker), it actually does not hurt
# timing performance meaningfully.
batch_t = [
metrics['t_batch'].reshape(-1) for metrics in batch_metrics]
batch_nelbo_per_t = [
metrics['nelbo_per_t_batch'].reshape(-1)
for metrics in batch_metrics]
for t, nelbo_per_t in zip(batch_t, batch_nelbo_per_t):
kl_tracker.update(t, nelbo_per_t)
################### Process batch metrics ############################
metrics = {key: np.mean([metrics[key] for metrics in batch_metrics])
for key in batch_metrics[0] if 'batch' not in key}
# Metric logging.
if is_first_host:
log_standard_metrics(writer, step, train_metrics=metrics)
batch_metrics = []
if config.eval_every_steps and (
(step % config.eval_every_steps == 0) or is_last_step):
with report_progress.timed('eval'):
####################### Run evaluation ###############################
metrics, rng_eval = eval_model(
p_eval_step,
rng_eval,
state,
it['eval'],
(ds_metadata['eval']['num_batches'] *
config.get('num_eval_passes', 1)))
# Metric logging.
if is_first_host:
log_standard_metrics(writer, step, eval_metrics=metrics)
# Track KL (unrelated to the eval, but nice to not do every step).
kl_values = kl_tracker.get_kl_per_t()
kl_history.append(np.array(kl_values))
kl_history = kl_history[-50:]
if config.sample_every_steps and (
(step % config.sample_every_steps == 0) or is_last_step):
with report_progress.timed('sample'):
######################### Run sampling ###############################
chain = model.sample(
jax.random.fold_in(rng_sample, step),
state.ema_params,
config.sample_batch_size,
chain_out_size=config.get('chain_out_size', model.num_stages))
if is_first_host:
chain = jax.device_get(chain)
long_sample = np.reshape(chain[-1], (1, -1, 1)).astype(np.float32)
long_sample = (2. * long_sample) / config.num_classes - 1.
writer.write_audios(
step, {'samples': long_sample}, sample_rate=config.sample_rate)
######################### Checkpointing #################################
if is_first_host and config.checkpoint_every_steps and (
(step % config.checkpoint_every_steps == 0) or is_last_step):
logging.info('Saving checkpoint: step %d', step)
with report_progress.timed('checkpoint'):
checkpoint.save_checkpoint(
work_dir, state=flax.jax_utils.unreplicate(state), step=step)
logging.info('Finished saving checkpoint: step %d', step)
return state
def monitor_and_sample(config, work_dir):
"""Monitors `work_dir` for new checkpoints and run sampling on them.
Args:
config: Hyperparameter configuration for training and evaluation.
work_dir: Directory where the tensorboard summaries are written to.
"""
# Init rng key.
rng = jax.random.PRNGKey(config.seed)
data_rng, rng = jax.random.split(rng)
is_first_host = jax.process_index() == 0
# TODO(agritsenko): We are loading the datasets just to get the metadata.
# Can we be smarter about this?
if config.dataset.name.endswith('speech_commands09'):
_, ds_metadata = input_pipeline_sc09.get_dataset(data_rng, config)
else:
raise ValueError(f'Unknown dataset {config.dataset.name}.')
# TODO(agritsenko): Can we fix the ugly nested dicts?
config.data_shape = ds_metadata['train']['shape']['inputs'][2:]
config.num_classes = ds_metadata['train']['num_classes']
config.sample_rate = ds_metadata['train']['sample_rate']
writer = metric_writers.create_default_writer(
work_dir, just_logging=jax.process_index() > 0)
rng, init_rng = jax.random.split(rng)
model, variables = model_setup(init_rng, config)
# From now on we want different rng across hosts:
rng = jax.random.fold_in(rng, jax.process_index())
rng, rng_sample = jax.random.split(rng)
def tx_fn(lr):
return optax.adamw(
lr, b1=0.9, b2=config.beta2, eps=1e-08, eps_root=0.0,
weight_decay=config.weight_decay)
state = language_train_state.TrainState.create(
params=variables['params'], tx_fn=tx_fn)
# Wait for checkpoints in an loop.
ckpt_path_iterator = checkpoint.checkpoints_iterator(work_dir, target=None)
with metric_writers.ensure_flushes(writer):
for _ in ckpt_path_iterator:
state, step = checkpoint.restore_from_path(work_dir, state)
is_last_step = step == config.num_train_steps - 1
logging.info('Loaded checkpoint for step: %d', step)
# Replicate the state
state = flax.jax_utils.replicate(state)
######################### Run sampling ###############################
chain = model.sample(
jax.random.fold_in(rng_sample, step),
state.ema_params,
config.sample_batch_size,
chain_out_size=config.get('chain_out_size', model.num_stages))
if is_first_host:
chain = jax.device_get(chain)
long_sample = np.reshape(chain[-1], (1, -1, 1)).astype(np.float32)
long_sample = (2. * long_sample) / config.num_classes - 1.
long_sample = long_sample.astype(np.float32)
writer.write_audios(
step, {'samples': long_sample}, sample_rate=config.sample_rate)
if is_last_step:
break
|
|
# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.python.eager import backprop
from tensorflow.python.eager import context
from tensorflow.python.eager import function
from tensorflow.python.framework import auto_control_deps as acd
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import control_flow_ops
from tensorflow.python.ops import resource_variable_ops
from tensorflow.python.ops import variables
from tensorflow.python.platform import test
from tensorflow.python.training import adam
from tensorflow.python.training import momentum
class AutomaticControlDependenciesTest(test.TestCase):
def testBasic(self):
with context.graph_mode(), self.cached_session():
v = resource_variable_ops.ResourceVariable(1.0)
variables.global_variables_initializer().run()
with acd.AutomaticControlDependencies() as c:
v.assign(v + 1)
v.assign(2 * v)
val = v.read_value()
val = c.mark_as_return(val)
self.assertAllEqual(val.eval(), 4.0)
def testCondMustRun(self):
with context.graph_mode(), self.cached_session():
v = resource_variable_ops.ResourceVariable(1.0)
variables.global_variables_initializer().run()
p = array_ops.placeholder(dtype=dtypes.bool)
with acd.AutomaticControlDependencies() as c:
def true_fn():
v.assign(v + 1)
return 0.0
def false_fn():
v.assign(v + 4)
return 1.0
control_flow_ops.cond(p, true_fn, false_fn)
val = v.read_value()
val = c.mark_as_return(val)
self.assertAllEqual(val.eval(feed_dict={p: False}), 5.0)
self.assertAllEqual(val.eval(feed_dict={p: True}), 6.0)
def testCondMustRunSeparateRead(self):
with context.graph_mode(), self.cached_session():
v = resource_variable_ops.ResourceVariable(1.0)
variables.global_variables_initializer().run()
p = array_ops.placeholder(dtype=dtypes.bool)
with acd.AutomaticControlDependencies() as c:
def true_fn():
v.assign(v + 1)
return 0.0
def false_fn():
v.assign(v + 4)
return 1.0
control_flow_ops.cond(p, true_fn, false_fn)
one = constant_op.constant(1.0)
one = c.mark_as_return(one)
one.eval(feed_dict={p: False})
self.assertAllEqual(v.read_value().eval(), 5.0)
one.eval(feed_dict={p: True})
self.assertAllEqual(v.read_value().eval(), 6.0)
def testCondNested(self):
with context.graph_mode(), self.cached_session():
v = resource_variable_ops.ResourceVariable(1.0)
variables.global_variables_initializer().run()
p = array_ops.placeholder(dtype=dtypes.bool)
q = array_ops.placeholder(dtype=dtypes.bool)
with acd.AutomaticControlDependencies() as c:
def true_fn():
v.assign(v + 1, name='true')
return 1.0
def false_fn():
def inner_true_fn():
v.assign(v * 2, name='false_true')
return 2.0
def inner_false_fn():
v.assign(v * 3, name='false_false')
return 3.0
control_flow_ops.cond(q, inner_true_fn, inner_false_fn)
return 1.0
control_flow_ops.cond(p, true_fn, false_fn)
with ops.name_scope('final'):
val = v.read_value()
val = c.mark_as_return(val)
self.assertAllEqual(val.eval(feed_dict={p: False, q: False}), 3.0)
self.assertAllEqual(val.eval(feed_dict={p: False, q: True}), 6.0)
self.assertAllEqual(val.eval(feed_dict={p: True, q: True}), 7.0)
self.assertAllEqual(val.eval(feed_dict={p: True, q: False}), 8.0)
def testCondOneBranch(self):
with context.graph_mode(), self.cached_session():
v = resource_variable_ops.ResourceVariable(1.0)
variables.global_variables_initializer().run()
p = array_ops.placeholder(dtype=dtypes.bool)
with acd.AutomaticControlDependencies() as c:
def true_fn():
return 0.0
def false_fn():
v.assign(v + 4)
return 1.0
control_flow_ops.cond(p, true_fn, false_fn)
val = v.read_value()
val = c.mark_as_return(val)
self.assertAllEqual(val.eval(feed_dict={p: False}), 5.0)
self.assertAllEqual(val.eval(feed_dict={p: True}), 5.0)
def testCondOneBranchUpdateBefore(self):
with context.graph_mode(), self.cached_session():
v = resource_variable_ops.ResourceVariable(1.0)
variables.global_variables_initializer().run()
p = array_ops.placeholder(dtype=dtypes.bool)
with acd.AutomaticControlDependencies() as c:
v.assign(v * 2)
def true_fn():
return 0.0
def false_fn():
v.assign(v + 4)
return 1.0
control_flow_ops.cond(p, true_fn, false_fn)
val = v.read_value()
val = c.mark_as_return(val)
self.assertAllEqual(val.eval(feed_dict={p: False}), 6.0)
self.assertAllEqual(val.eval(feed_dict={p: True}), 12.0)
def testCondOneBranchUpdateAfter(self):
with context.graph_mode(), self.cached_session():
v = resource_variable_ops.ResourceVariable(1.0)
variables.global_variables_initializer().run()
p = array_ops.placeholder(dtype=dtypes.bool)
with acd.AutomaticControlDependencies() as c:
def true_fn():
return 0.0
def false_fn():
v.assign(v + 4)
return 1.0
control_flow_ops.cond(p, true_fn, false_fn)
v.assign(v * 2)
val = v.read_value()
val = c.mark_as_return(val)
self.assertAllEqual(val.eval(feed_dict={p: False}), 10.0)
self.assertAllEqual(val.eval(feed_dict={p: True}), 20.0)
def testDefunWhileLoopWithCapturedLoopVars(self):
n = 3
x = constant_op.constant(list(range(n)))
@function.defun
def loop():
c = lambda i, x: i < n
b = lambda i, x: (i + 1, x + 1)
i, out = control_flow_ops.while_loop(c, b, (0, x))
return i, out
i, out = loop()
self.assertEqual(int(i), 3)
self.assertAllEqual(out, [3, 4, 5])
def testDecorator(self):
with context.graph_mode(), self.cached_session():
v = resource_variable_ops.ResourceVariable(1.0)
variables.global_variables_initializer().run()
@acd.automatic_control_dependencies
def f():
v.assign(v + 1)
v.assign(2 * v)
return v.read_value()
self.assertAllEqual(f().eval(), 4.0)
def testOptimizerInDefun(self):
def loss(v):
return v**2
optimizer = momentum.MomentumOptimizer(learning_rate=1.0, momentum=1.0)
@function.defun
def train():
self.v = resource_variable_ops.ResourceVariable(1.0)
grad = backprop.implicit_grad(loss)(self.v)
optimizer.apply_gradients(grad)
return self.v.read_value()
value = train()
self.assertEqual(value.numpy(), -1.0)
def testReturningNonTensorRaisesError(self):
optimizer = momentum.MomentumOptimizer(learning_rate=1.0, momentum=1.0)
optimizer.apply_gradients = function.defun(optimizer.apply_gradients)
v = resource_variable_ops.ResourceVariable(1.0)
grad = backprop.implicit_grad(lambda v: v**2)(v)
with self.assertRaisesRegexp(TypeError,
'.*must return zero or more Tensors.*'):
# TODO(akshayka): We might want to allow defun-ing Python functions
# that return operations (and just execute the op instead of running it).
optimizer.apply_gradients(grad)
# TODO(b/111663004): This should work when the outer context is graph
# building.
def testOptimizerNonSlotVarsInDefunNoError(self):
def loss(v):
return v**2
optimizer = adam.AdamOptimizer(learning_rate=1.0)
@function.defun
def train():
self.v = resource_variable_ops.ResourceVariable(1.0)
grad = backprop.implicit_grad(loss)(self.v)
optimizer.apply_gradients(grad)
return self.v.read_value()
train()
def testOptimizerInDefunWithCapturedVariable(self):
v = resource_variable_ops.ResourceVariable(1.0)
def loss():
return v**2
optimizer = momentum.MomentumOptimizer(learning_rate=1.0, momentum=1.0)
@function.defun
def train():
grad = backprop.implicit_grad(loss)()
optimizer.apply_gradients(grad)
train()
self.assertEqual(v.numpy(), -1.0)
if __name__ == '__main__':
ops.enable_eager_execution()
test.main()
|
|
# Copyright (c) 2007, Columbia Center For New Media Teaching And Learning (CCNMTL)
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# * Neither the name of the CCNMTL nor the
# names of its contributors may be used to endorse or promote products
# derived from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY CCNMTL ``AS IS'' AND ANY
# EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL <copyright holder> BE LIABLE FOR ANY
# DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
# (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
# ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#!/usr/bin/python
"""
REST client convenience library
This module contains everything that's needed for a nice, simple REST client.
the main function it provides is rest_invoke(), which will make an HTTP
request to a REST server. it allows for all kinds of nice things like:
* alternative verbs: POST, PUT, DELETE, etc.
* parameters
* file uploads (multipart/form-data)
* proper unicode handling
* Accept: headers
* ability to specify other headers
this library is mostly a wrapper around the standard urllib and
httplib2 functionality, but also includes file upload support via a
python cookbook recipe
(http://aspn.activestate.com/ASPN/Cookbook/Python/Recipe/146306) and
has had additional work to make sure high unicode characters in the
parameters or headers don't cause any UnicodeEncodeError problems.
Joe Gregario's httplib2 library is required. It can be easy_installed, or downloaded
nose is required to run the unit tests.
CHANGESET:
* 2009-10-08 - Anders - merged Taras Mankovski's patch for adding body to POST/PUT and test improvements
* 2009-10-08 - Anders - merged Peter Fein's patch for specifying mimetypes on file uploads
* 2007-06-13 - Anders - added experimental, partial support for HTTPCallback
* 2007-03-28 - Anders - merged Christopher Hesse's patches for fix_params and to eliminate
mutable default args
* 2007-03-14 - Anders - quieted BaseHTTPServer in the test suite
* 2007-03-06 - Anders - merged Christopher Hesse's bugfix and self-contained test suite
* 2006-12-01 - Anders - switched to httplib2. Improved handling of parameters and made it
stricter about unicode in headers (only ASCII is allowed). Added
resp option. More docstrings.
* 2006-03-23 - Anders - working around cherrypy bug properly now by being more
careful about sending the right
* 2006-03-17 - Anders - fixed my broken refactoring :) also added async support
n and we now use post_multipart for everything since it works
around a cherrypy bug.
* 2006-03-10 - Anders - refactored and added GET, POST, PUT, and DELETE
convenience functions
* 2006-02-22 - Anders - handles ints in params + headers correctly now
"""
import urllib2,urllib, mimetypes, types, thread, httplib2
__version__ = "0.9.10"
def post_multipart(host, selector, method,fields, files, headers=None,return_resp=False, body=None):
"""
Post fields and files to an http host as multipart/form-data.
fields is a sequence of (name, value) elements for regular form fields.
files is a sequence of (name, filename, value) elements for data to be uploaded as files
Return the server's response page.
"""
if headers is None: headers = {}
content_type, body = encode_multipart_formdata(fields, files, body)
h = httplib2.Http()
headers['Content-Length'] = str(len(body))
headers['Content-Type'] = content_type
resp, content = h.request("http://%s%s" % (host,selector),method,body,headers)
if return_resp:
return resp, content
else:
return content
def encode_multipart_formdata(fields, files, body):
"""
fields is a sequence of (name, value) elements for regular form fields.
files is a sequence of (name, filename, value) elements for data to be uploaded as files
Return (content_type, body) ready for httplib.HTTP instance
"""
BOUNDARY = '----------ThIs_Is_tHe_bouNdaRY_$'
CRLF = '\r\n'
L = []
# add body to the beginning
if body:
L.append(body)
L.append('')
L.append(str(body))
for (key, value) in fields:
L.append('--' + BOUNDARY)
L.append('Content-Disposition: form-data; name="%s"' % key)
L.append('')
L.append(str(value))
for (key, filename, value, mimetype) in files:
L.append('--' + BOUNDARY)
L.append('Content-Disposition: form-data; name="%s"; filename="%s"' % (key, filename))
if mimetype is None:
content_type=get_content_type(filename)
else:
content_type=mimetype
L.append('Content-Type: %s' % content_type)
L.append('')
L.append(str(value))
L.append('--' + BOUNDARY + '--')
L.append('')
L = [str(l) for l in L]
body = CRLF.join(L)
content_type = 'multipart/form-data; boundary=%s' % BOUNDARY
return content_type, body
def get_content_type(filename):
return mimetypes.guess_type(filename)[0] or 'application/octet-stream'
def GET(url,params=None,files=None,accept=[],headers=None,async=False,resp=False):
""" make an HTTP GET request.
performs a GET request to the specified URL and returns the body of the response.
in addition, parameters and headers can be specified (as dicts). a list of mimetypes
to accept may be specified.
if async=True is passed in, it will perform the request in a new thread
and immediately return nothing.
if resp=True is passed in, it will return a tuple of an httplib2 response object
and the content instead of just the content.
"""
return rest_invoke(url=url,method=u"GET",params=params,files=files,accept=accept,headers=headers,async=async,resp=resp)
def POST(url,**kwargs):
""" make an HTTP POST request.
url - string to make post to
params - string to include with post ( optional, params and body are mutually exlusive )
body - string to include with post ( optional, params and body are mutually exlusive )
accept - list of mimetypes to accept
files - files to upload may be specified in dict format {'file' : file object, 'filename' : filename}
headers - dict of headers to include
async - boolean set to False to wait for response and have it return body of the response
resp - boolean True will return a tuple of a httplib2 response object and the content instead of just content
"""
kwargs['method'] = u'POST'
return rest_invoke(url=url,**kwargs)
def PUT(url,**kwargs):
""" make an HTTP PUT request.
url - string to make post to
params - string to include with post ( optional, params and body are mutually exlusive )
body - string to include with post ( optional, params and body are mutually exlusive )
accept - list of mimetypes to accept
files - files to upload may be specified in dict format {'file' : file object, 'filename' : filename}
headers - dict of headers to include
async - boolean set to False to wait for response and have it return body of the response
resp - boolean True will return a tuple of a httplib2 response object and the content instead of just content
"""
kwargs['method'] = u'PUT'
return rest_invoke(url=url,**kwargs)
def DELETE(url,**kwargs):
""" make an HTTP DELETE request.
url - string to make post to
params - string to include with post ( optional, params and body are mutually exlusive )
body - string to include with post ( optional, params and body are mutually exlusive )
accept - list of mimetypes to accept
headers - dict of headers to include
async - boolean set to False to wait for response and have it return body of the response
resp - boolean True will return a tuple of a httplib2 response object and the content instead of just content
by default DELETE() performs the request in a new thread and returns (nothing) immediately.
To wait for the response and have it return the body of the response, specify async=False.
if resp=True is passed in, it will return a tuple of an httplib2 response object
and the content instead of just the content.
"""
kwargs['method'] = u'DELETE'
return rest_invoke(url=url,**kwargs)
def rest_invoke(url, **kwargs):
""" make an HTTP request with all the trimmings.
rest_invoke() will make an HTTP request and can handle all the
advanced things that are necessary for a proper REST client to handle:
* alternative verbs: POST, PUT, DELETE, etc.
* parameters
* file uploads (multipart/form-data)
* proper unicode handling
* Accept: headers
* ability to specify other headers
rest_invoke() returns the body of the response that it gets from
the server.
rest_invoke() does not try to do any fancy error handling. if the
server is down or gives an error, it will propagate up to the
caller.
this function expects to receive unicode strings. passing in byte
strings risks double encoding.
parameters:
url: the full url you are making the request to
method: HTTP verb to use. defaults to GET
params: dictionary of params to include in the request
files: dictionary of files to upload. the structure is
param : {'file' : file object, 'filename' : filename, 'mimetype': mimetype (optional)}
accept: list of mimetypes to accept in order of preference. defaults to '*/*'
headers: dictionary of additional headers to send to the server
async: Boolean. if true, does request in new thread and nothing is returned
resp: Boolean. if true, returns a tuple of response,content. otherwise returns just content
httpcallback: None. an HTTPCallback object (see http://microapps.org/HTTP_Callback). If specified, it will
override the other params.
"""
if kwargs.get('async',False):
method = kwargs.get('method', u'GET')
params = kwargs.get('params', None)
files = kwargs.get('files', None)
accept = kwargs.get('accept', [])
headers = kwargs.get('headers', None)
resp = kwargs.get('resp', None)
httpcallback = kwargs.get('httpcallback', None)
body = kwargs.get('body', None)
thread.start_new_thread(_rest_invoke,(url, method, params, files, accept, headers, resp, httpcallback, body))
else:
return _rest_invoke(url,**kwargs)
def _rest_invoke(url,**kwargs):
method = kwargs.get('method', u'GET')
params = kwargs.get('params', {})
files = kwargs.get('files', {})
accept = kwargs.get('accept', [])
headers = kwargs.get('headers', {})
body = kwargs.get('body', None)
resp = kwargs.get('resp', False)
httpcallback = kwargs.get('httpcallback', None)
if httpcallback is not None:
method = httpcallback.method
url = httpcallback.url
body = httpcallback.body
if httpcallback.queryString != "":
if "?" not in url:
url += "?" + httpcallback.queryString
else:
url += "&" * httpcallback.queryString
ps = httpcallback.params
for (k,v) in ps:
params[k] = v
hs = httpcallback.headers
for (k,v) in hs:
headers[k] = v
if httpcallback.username or httpcallback.password:
print "warning: restclient can't handle HTTP auth yet"
if httpcallback.redirections != 5:
print "warning: restclient doesn't support HTTPCallback's restrictions yet"
if httpcallback.follow_all_redirects:
print "warning: restclient doesn't support HTTPCallback's follow_all_redirects_yet"
headers = add_accepts(accept,headers)
if files:
return post_multipart(extract_host(url),extract_path(url),
method,
unpack_params(fix_params(params)),
unpack_files(fix_files(files)),
fix_headers(headers),
resp,body)
else:
return non_multipart(fix_params(params), extract_host(url),
method, extract_path(url), fix_headers(headers),resp,body)
def non_multipart(params,host,method,path,headers,return_resp,body):
params = urllib.urlencode(params)
if method == "GET":
headers['Content-Length'] = '0'
if params:
# put the params into the url instead of the body
if "?" not in path:
path += "?" + params
else:
if path.endswith('?'):
path += params
else:
path += "&" + params
params = ""
else:
if body:
headers['Content-Length'] = str(len(params) + len(body))
else:
headers['Content-Length'] = str(len(params))
if not headers.has_key('Content-Type'):
headers['Content-Type'] = 'application/x-www-form-urlencoded'
h = httplib2.Http()
url = "http://%s%s" % (host,path)
if method != 'GET' and body:
resp,content = h.request(url,method.encode('utf-8'),body.encode('utf-8'),headers)
else:
resp, content = h.request(url,method.encode('utf-8'),params.encode('utf-8'),headers)
if return_resp:
return resp,content
else:
return content
def extract_host(url):
return my_urlparse(url)[0]
def extract_path(url):
return my_urlparse(url)[1]
def my_urlparse(url):
(scheme,host,path,ps,query,fragment) = urllib2.urlparse.urlparse(url)
if ps:
path += ";" + ps
if query:
path += "?" + query
return (host,path)
def unpack_params(params):
return [(k,params[k]) for k in params.keys()]
def unpack_files(files):
return [(k,files[k]['filename'],files[k]['file'],files[k].get('mimetype', None))
for k in files.keys()]
def add_accepts(accept=None,headers=None):
if accept is None: accept = []
if headers is None: headers = {}
if accept:
headers['Accept'] = ','.join(accept)
else:
headers['Accept'] = '*/*'
return headers
def fix_params(params=None):
if params is None: params = {}
for k in params.keys():
if type(k) not in types.StringTypes:
new_k = str(k)
params[new_k] = params[k]
del params[k]
else:
try:
k = k.encode('ascii')
except UnicodeEncodeError:
new_k = k.encode('utf8')
params[new_k] = params[k]
del params[k]
except UnicodeDecodeError:
pass
for k in params.keys():
if type(params[k]) not in types.StringTypes:
params[k] = str(params[k])
try:
v = params[k].encode('ascii')
except UnicodeEncodeError:
new_v = params[k].encode('utf8')
params[k] = new_v
except UnicodeDecodeError:
pass
return params
def fix_headers(headers=None):
if headers is None: headers = {}
for k in headers.keys():
if type(k) not in types.StringTypes:
new_k = str(k)
headers[new_k] = headers[k]
del headers[k]
if type(headers[k]) not in types.StringTypes:
headers[k] = str(headers[k])
try:
v = headers[k].encode('ascii')
k = k.encode('ascii')
except UnicodeEncodeError:
new_k = k.encode('ascii','ignore')
new_v = headers[k].encode('ascii','ignore')
headers[new_k] = new_v
del headers[k]
return headers
def fix_files(files=None):
if files is None: files = {}
# fix keys in files
for k in files.keys():
if type(k) not in types.StringTypes:
new_k = str(k)
files[new_k] = files[k]
del files[k]
try:
k = k.encode('ascii')
except UnicodeEncodeError:
new_k = k.encode('utf8')
files[new_k] = files[k]
del files[k]
# second pass to fix filenames
for k in files.keys():
try:
f = files[k]['filename'].encode('ascii')
except UnicodeEncodeError:
files[k]['filename'] = files[k]['filename'].encode('utf8')
return files
if __name__ == "__main__":
print rest_invoke("http://localhost:9090/",
method="POST",params={'value' : 'store this'},accept=["text/plain","text/html"],async=False)
image = open('sample.jpg').read()
r = rest_invoke("http://resizer.ccnmtl.columbia.edu/resize",method="POST",files={'image' : {'file' : image, 'filename' : 'sample.jpg'}},async=False)
out = open("thumb.jpg","w")
out.write(r)
out.close()
GET("http://resizer.ccnmtl.columbia.edu/")
r = POST("http://resizer.ccnmtl.columbia.edu/resize",files={'image' : {'file' : image, 'filename' : 'sample.jpg'}},async=False)
# evil unicode tests
print rest_invoke(u"http://localhost:9090/foo/",params={u'foo\u2012' : u'\u2012'},
headers={u"foo\u2012" : u"foo\u2012"})
r = rest_invoke(u"http://localhost:9090/resize",method="POST",files={u'image\u2012' : {'file' : image, 'filename' : u'samp\u2012le.jpg'}},async=False)
|
|
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import random
import time
from oslo.config import cfg
import six
from neutron.openstack.common.gettextutils import _, _LE, _LI
from neutron.openstack.common import log as logging
periodic_opts = [
cfg.BoolOpt('run_external_periodic_tasks',
default=True,
help='Some periodic tasks can be run in a separate process. '
'Should we run them here?'),
]
CONF = cfg.CONF
CONF.register_opts(periodic_opts)
LOG = logging.getLogger(__name__)
DEFAULT_INTERVAL = 60.0
class InvalidPeriodicTaskArg(Exception):
message = _("Unexpected argument for periodic task creation: %(arg)s.")
def periodic_task(*args, **kwargs):
"""Decorator to indicate that a method is a periodic task.
This decorator can be used in two ways:
1. Without arguments '@periodic_task', this will be run on the default
interval of 60 seconds.
2. With arguments:
@periodic_task(spacing=N [, run_immediately=[True|False]])
this will be run on approximately every N seconds. If this number is
negative the periodic task will be disabled. If the run_immediately
argument is provided and has a value of 'True', the first run of the
task will be shortly after task scheduler starts. If
run_immediately is omitted or set to 'False', the first time the
task runs will be approximately N seconds after the task scheduler
starts.
"""
def decorator(f):
# Test for old style invocation
if 'ticks_between_runs' in kwargs:
raise InvalidPeriodicTaskArg(arg='ticks_between_runs')
# Control if run at all
f._periodic_task = True
f._periodic_external_ok = kwargs.pop('external_process_ok', False)
if f._periodic_external_ok and not CONF.run_external_periodic_tasks:
f._periodic_enabled = False
else:
f._periodic_enabled = kwargs.pop('enabled', True)
# Control frequency
f._periodic_spacing = kwargs.pop('spacing', 0)
f._periodic_immediate = kwargs.pop('run_immediately', False)
if f._periodic_immediate:
f._periodic_last_run = None
else:
f._periodic_last_run = time.time()
return f
# NOTE(sirp): The `if` is necessary to allow the decorator to be used with
# and without parenthesis.
#
# In the 'with-parenthesis' case (with kwargs present), this function needs
# to return a decorator function since the interpreter will invoke it like:
#
# periodic_task(*args, **kwargs)(f)
#
# In the 'without-parenthesis' case, the original function will be passed
# in as the first argument, like:
#
# periodic_task(f)
if kwargs:
return decorator
else:
return decorator(args[0])
class _PeriodicTasksMeta(type):
def __init__(cls, names, bases, dict_):
"""Metaclass that allows us to collect decorated periodic tasks."""
super(_PeriodicTasksMeta, cls).__init__(names, bases, dict_)
# NOTE(sirp): if the attribute is not present then we must be the base
# class, so, go ahead an initialize it. If the attribute is present,
# then we're a subclass so make a copy of it so we don't step on our
# parent's toes.
try:
cls._periodic_tasks = cls._periodic_tasks[:]
except AttributeError:
cls._periodic_tasks = []
try:
cls._periodic_spacing = cls._periodic_spacing.copy()
except AttributeError:
cls._periodic_spacing = {}
for value in cls.__dict__.values():
if getattr(value, '_periodic_task', False):
task = value
name = task.__name__
if task._periodic_spacing < 0:
LOG.info(_LI('Skipping periodic task %(task)s because '
'its interval is negative'),
{'task': name})
continue
if not task._periodic_enabled:
LOG.info(_LI('Skipping periodic task %(task)s because '
'it is disabled'),
{'task': name})
continue
# A periodic spacing of zero indicates that this task should
# be run on the default interval to avoid running too
# frequently.
if task._periodic_spacing == 0:
task._periodic_spacing = DEFAULT_INTERVAL
cls._periodic_tasks.append((name, task))
cls._periodic_spacing[name] = task._periodic_spacing
def _nearest_boundary(last_run, spacing):
"""Find nearest boundary which is in the past, which is a multiple of the
spacing with the last run as an offset.
Eg if last run was 10 and spacing was 7, the new last run could be: 17, 24,
31, 38...
0% to 5% of the spacing value will be added to this value to ensure tasks
do not synchronize. This jitter is rounded to the nearest second, this
means that spacings smaller than 20 seconds will not have jitter.
"""
current_time = time.time()
if last_run is None:
return current_time
delta = current_time - last_run
offset = delta % spacing
# Add up to 5% jitter
jitter = int(spacing * (random.random() / 20))
return current_time - offset + jitter
@six.add_metaclass(_PeriodicTasksMeta)
class PeriodicTasks(object):
def __init__(self):
super(PeriodicTasks, self).__init__()
self._periodic_last_run = {}
for name, task in self._periodic_tasks:
self._periodic_last_run[name] = task._periodic_last_run
def run_periodic_tasks(self, context, raise_on_error=False):
"""Tasks to be run at a periodic interval."""
idle_for = DEFAULT_INTERVAL
for task_name, task in self._periodic_tasks:
full_task_name = '.'.join([self.__class__.__name__, task_name])
spacing = self._periodic_spacing[task_name]
last_run = self._periodic_last_run[task_name]
# Check if due, if not skip
idle_for = min(idle_for, spacing)
if last_run is not None:
delta = last_run + spacing - time.time()
if delta > 0:
idle_for = min(idle_for, delta)
continue
LOG.debug("Running periodic task %(full_task_name)s",
{"full_task_name": full_task_name})
self._periodic_last_run[task_name] = _nearest_boundary(
last_run, spacing)
try:
task(self, context)
except Exception as e:
if raise_on_error:
raise
LOG.exception(_LE("Error during %(full_task_name)s: %(e)s"),
{"full_task_name": full_task_name, "e": e})
time.sleep(0)
return idle_for
|
|
#!/usr/bin/env python3
# coding: utf-8
"""Test architectures."""
import unittest
from triton import ARCH, TritonContext
from random import randrange
class TestX86ConcreteRegisterValue(unittest.TestCase):
"""Testing the X86 concrete value api."""
def setUp(self):
"""Define the arch."""
self.Triton = TritonContext()
self.Triton.setArchitecture(ARCH.X86)
self.ar = self.Triton.getAllRegisters()
self.pr = self.Triton.getParentRegisters()
def test_all_registers(self):
"""Check all registers"""
self.assertEqual(len(self.ar), 165)
def test_parent_registers(self):
"""Check parent registers"""
self.assertEqual(len(self.pr), 128)
def test_set_get_concrete_value(self):
"""Check setting concrete values"""
for r in self.pr:
if r.getBitSize() == 32:
self.Triton.setConcreteRegisterValue(r, 0xdeadbeaf)
elif r.getBitSize() == 64:
self.Triton.setConcreteRegisterValue(r, 0xabcdef0123456789)
elif r.getBitSize() == 128:
self.Triton.setConcreteRegisterValue(r, 0xabcdef01234567899876543210fedcba)
elif r.getBitSize() == 256:
self.Triton.setConcreteRegisterValue(r, 0xabcdef01234567899876543210fedcbaabcdef01234567899876543210fedcba)
else:
pass
"""Check getting concrete values"""
for r in self.pr:
if r.getBitSize() == 32:
self.assertEqual(self.Triton.getConcreteRegisterValue(r), 0xdeadbeaf)
elif r.getBitSize() == 64:
self.assertEqual(self.Triton.getConcreteRegisterValue(r), 0xabcdef0123456789)
elif r.getBitSize() == 128:
self.assertEqual(self.Triton.getConcreteRegisterValue(r), 0xabcdef01234567899876543210fedcba)
elif r.getBitSize() == 256:
self.assertEqual(self.Triton.getConcreteRegisterValue(r), 0xabcdef01234567899876543210fedcbaabcdef01234567899876543210fedcba)
else:
pass
"""Set everything to zero"""
for r in self.ar:
self.Triton.setConcreteRegisterValue(r, 0)
"""Check if everything is equal to zero"""
for r in self.ar:
self.assertEqual(self.Triton.getConcreteRegisterValue(r), 0)
def test_fp_reg(self):
"""Check setting concrete values"""
for r in self.pr:
if r.getBitSize() == 80:
self.Triton.setConcreteRegisterValue(r, 0xabcdef0123456789dead)
"""Check getting concrete values"""
for r in self.pr:
if r.getBitSize() == 80:
self.assertEqual(self.Triton.getConcreteRegisterValue(r), 0xabcdef0123456789dead)
def test_rand_set_get_concrete_value(self):
"""Check setting concrete values"""
for _ in range(100):
for reg in self.ar:
v = randrange(0, reg.getBitvector().getMaxValue() + 1)
self.Triton.setConcreteRegisterValue(reg, v)
self.assertEqual(self.Triton.getConcreteRegisterValue(reg), v)
class TestX8664ConcreteRegisterValue(unittest.TestCase):
"""Testing the X86_64 concrete value api."""
def setUp(self):
"""Define the arch."""
self.Triton = TritonContext()
self.Triton.setArchitecture(ARCH.X86_64)
self.ar = self.Triton.getAllRegisters()
self.pr = self.Triton.getParentRegisters()
def test_all_registers(self):
"""Check all registers"""
self.assertEqual(len(self.ar), 254)
def test_parent_registers(self):
"""Check parent registers"""
self.assertEqual(len(self.pr), 200)
def test_set_get_concrete_value(self):
"""Check setting concrete values"""
for r in self.pr:
if r.getBitSize() == 32:
self.Triton.setConcreteRegisterValue(r, 0xdeadbeaf)
elif r.getBitSize() == 64:
self.Triton.setConcreteRegisterValue(r, 0xabcdef0123456789)
elif r.getBitSize() == 128:
self.Triton.setConcreteRegisterValue(r, 0xabcdef01234567899876543210fedcba)
elif r.getBitSize() == 256:
self.Triton.setConcreteRegisterValue(r, 0xabcdef01234567899876543210fedcbaabcdef01234567899876543210fedcba)
elif r.getBitSize() == 512:
self.Triton.setConcreteRegisterValue(r, 0xabcdef01234567899876543210fedcbaabcdef01234567899876543210fedcbaabcdef01234567899876543210fedcbaabcdef01234567899876543210fedcba)
else:
pass
"""Check getting concrete values"""
for r in self.pr:
if r.getBitSize() == 32:
self.assertEqual(self.Triton.getConcreteRegisterValue(r), 0xdeadbeaf)
elif r.getBitSize() == 64:
self.assertEqual(self.Triton.getConcreteRegisterValue(r), 0xabcdef0123456789)
elif r.getBitSize() == 128:
self.assertEqual(self.Triton.getConcreteRegisterValue(r), 0xabcdef01234567899876543210fedcba)
elif r.getBitSize() == 256:
self.assertEqual(self.Triton.getConcreteRegisterValue(r), 0xabcdef01234567899876543210fedcbaabcdef01234567899876543210fedcba)
elif r.getBitSize() == 512:
self.assertEqual(self.Triton.getConcreteRegisterValue(r), 0xabcdef01234567899876543210fedcbaabcdef01234567899876543210fedcbaabcdef01234567899876543210fedcbaabcdef01234567899876543210fedcba)
else:
pass
"""Set everything to zero"""
for r in self.ar:
self.Triton.setConcreteRegisterValue(r, 0)
"""Check if everything is equal to zero"""
for r in self.ar:
self.assertEqual(self.Triton.getConcreteRegisterValue(r), 0)
def test_fp_reg(self):
"""Check setting concrete values"""
for r in self.pr:
if r.getBitSize() == 80:
self.Triton.setConcreteRegisterValue(r, 0xabcdef0123456789dead)
"""Check getting concrete values"""
for r in self.pr:
if r.getBitSize() == 80:
self.assertEqual(self.Triton.getConcreteRegisterValue(r), 0xabcdef0123456789dead)
class TestX86ConcreteMemoryValue(unittest.TestCase):
"""Testing the X86 concrete value api."""
def setUp(self):
"""Define the arch."""
self.Triton = TritonContext()
self.Triton.setArchitecture(ARCH.X86)
def test_set_get_concrete_value(self):
base = 0x1000
size = 256
count = 1
self.assertFalse(self.Triton.isConcreteMemoryValueDefined(base, size))
for x in range(size):
self.Triton.setConcreteMemoryValue(base + x, count & 0xff)
self.assertEqual(self.Triton.getConcreteMemoryValue(base + x), count & 0xff)
count += 1
self.assertTrue(self.Triton.isConcreteMemoryValueDefined(base, size))
self.Triton.clearConcreteMemoryValue(base, size)
self.assertFalse(self.Triton.isConcreteMemoryValueDefined(base, size))
self.Triton.setConcreteMemoryAreaValue(0x1000, b"\x11\x22\x33\x44\x55\x66")
self.Triton.setConcreteMemoryAreaValue(0x1006, [0x77, 0x88, 0x99, 0xaa, 0xbb, 0xcc])
self.assertEqual(self.Triton.getConcreteMemoryAreaValue(0x1000, 12), b"\x11\x22\x33\x44\x55\x66\x77\x88\x99\xaa\xbb\xcc")
class TestX8664ConcreteMemoryValue(unittest.TestCase):
"""Testing the X86 concrete value api."""
def setUp(self):
"""Define the arch."""
self.Triton = TritonContext()
self.Triton.setArchitecture(ARCH.X86_64)
def test_set_get_concrete_value(self):
base = 0x2000
size = 512
count = 1
self.assertFalse(self.Triton.isConcreteMemoryValueDefined(base, size))
for x in range(size):
self.Triton.setConcreteMemoryValue(base + x, count & 0xff)
self.assertEqual(self.Triton.getConcreteMemoryValue(base + x), count & 0xff)
count += 1
self.assertTrue(self.Triton.isConcreteMemoryValueDefined(base, size))
self.Triton.clearConcreteMemoryValue(base, size)
self.assertFalse(self.Triton.isConcreteMemoryValueDefined(base, size))
self.Triton.setConcreteMemoryAreaValue(0x1000, b"\x11\x22\x33\x44\x55\x66")
self.Triton.setConcreteMemoryAreaValue(0x1006, [0x77, 0x88, 0x99, 0xaa, 0xbb, 0xcc])
self.assertEqual(self.Triton.getConcreteMemoryAreaValue(0x1000, 12), b"\x11\x22\x33\x44\x55\x66\x77\x88\x99\xaa\xbb\xcc")
|
|
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2012 Cloudbase Solutions Srl
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
Classes for dynamic generation of mock objects.
"""
import inspect
def serialize_obj(obj):
if isinstance(obj, float):
val = str(round(obj, 10))
elif isinstance(obj, dict):
d = {}
for k1, v1 in obj.items():
d[k1] = serialize_obj(v1)
val = str(d)
elif isinstance(obj, list):
l1 = []
for i1 in obj:
l1.append(serialize_obj(i1))
val = str(l1)
elif isinstance(obj, tuple):
l1 = ()
for i1 in obj:
l1 = l1 + (serialize_obj(i1),)
val = str(l1)
else:
val = str(obj)
return val
def serialize_args(*args, **kwargs):
"""Workaround for float string conversion issues in Python 2.6."""
return serialize_obj((args, kwargs))
class Mock(object):
def _get_next_value(self, name):
c = self._access_count.get(name)
if c is None:
c = 0
else:
c = c + 1
self._access_count[name] = c
return self._values[name][c]
def _get_next_ret_value(self, name, params):
d = self._access_count.get(name)
if d is None:
d = {}
self._access_count[name] = d
c = d.get(params)
if c is None:
c = 0
else:
c = c + 1
d[params] = c
return self._values[name][params][c]
def __init__(self, values):
self._values = values
self._access_count = {}
def has_values(self):
return len(self._values) > 0
def __getattr__(self, name):
if name.startswith('__') and name.endswith('__'):
return object.__getattribute__(self, name)
else:
if isinstance(self._values[name], dict):
def newfunc(*args, **kwargs):
params = serialize_args(args, kwargs)
return self._get_next_ret_value(name, params)
return newfunc
else:
return self._get_next_value(name)
def __str__(self):
return self._get_next_value('__str__')
def __iter__(self):
return getattr(self._get_next_value('__iter__'), '__iter__')()
def __len__(self):
return self._get_next_value('__len__')
def __getitem__(self, key):
return self._get_next_ret_value('__getitem__', str(key))
def __call__(self, *args, **kwargs):
params = serialize_args(args, kwargs)
return self._get_next_ret_value('__call__', params)
class MockProxy(object):
def __init__(self, wrapped):
self._wrapped = wrapped
self._recorded_values = {}
def _get_proxy_object(self, obj):
if (hasattr(obj, '__dict__') or
isinstance(obj, tuple) or
isinstance(obj, list) or
isinstance(obj, dict)):
p = MockProxy(obj)
else:
p = obj
return p
def __getattr__(self, name):
if name in ['_wrapped']:
return object.__getattribute__(self, name)
else:
attr = getattr(self._wrapped, name)
if (inspect.isfunction(attr) or
inspect.ismethod(attr) or
inspect.isbuiltin(attr)):
def newfunc(*args, **kwargs):
result = attr(*args, **kwargs)
p = self._get_proxy_object(result)
params = serialize_args(args, kwargs)
self._add_recorded_ret_value(name, params, p)
return p
return newfunc
elif (hasattr(attr, '__dict__') or
(hasattr(attr, '__getitem__') and not
(isinstance(attr, str) or isinstance(attr, unicode)))):
p = MockProxy(attr)
else:
p = attr
self._add_recorded_value(name, p)
return p
def __setattr__(self, name, value):
if name in ['_wrapped', '_recorded_values']:
object.__setattr__(self, name, value)
else:
setattr(self._wrapped, name, value)
def _add_recorded_ret_value(self, name, params, val):
d = self._recorded_values.get(name)
if d is None:
d = {}
self._recorded_values[name] = d
l = d.get(params)
if l is None:
l = []
d[params] = l
l.append(val)
def _add_recorded_value(self, name, val):
if name not in self._recorded_values:
self._recorded_values[name] = []
self._recorded_values[name].append(val)
def get_mock(self):
values = {}
for k, v in self._recorded_values.items():
if isinstance(v, dict):
d = {}
values[k] = d
for k1, v1 in v.items():
l = []
d[k1] = l
for i1 in v1:
if isinstance(i1, MockProxy):
l.append(i1.get_mock())
else:
l.append(i1)
else:
l = []
values[k] = l
for i in v:
if isinstance(i, MockProxy):
l.append(i.get_mock())
elif isinstance(i, dict):
d = {}
for k1, v1 in v.items():
if isinstance(v1, MockProxy):
d[k1] = v1.get_mock()
else:
d[k1] = v1
l.append(d)
elif isinstance(i, list):
l1 = []
for i1 in i:
if isinstance(i1, MockProxy):
l1.append(i1.get_mock())
else:
l1.append(i1)
l.append(l1)
else:
l.append(i)
return Mock(values)
def __str__(self):
s = str(self._wrapped)
self._add_recorded_value('__str__', s)
return s
def __len__(self):
l = len(self._wrapped)
self._add_recorded_value('__len__', l)
return l
def __iter__(self):
it = []
for i in self._wrapped:
it.append(self._get_proxy_object(i))
self._add_recorded_value('__iter__', it)
return iter(it)
def __getitem__(self, key):
p = self._get_proxy_object(self._wrapped[key])
self._add_recorded_ret_value('__getitem__', str(key), p)
return p
def __call__(self, *args, **kwargs):
c = self._wrapped(*args, **kwargs)
p = self._get_proxy_object(c)
params = serialize_args(args, kwargs)
self._add_recorded_ret_value('__call__', params, p)
return p
|
|
#
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# cython: profile=True
"""Worker operations executor.
For internal use only; no backwards-compatibility guarantees.
"""
import sys
import traceback
from apache_beam.internal import util
from apache_beam.metrics.execution import ScopedMetricsContainer
from apache_beam.pvalue import TaggedOutput
from apache_beam.transforms import core
from apache_beam.transforms.window import GlobalWindow
from apache_beam.transforms.window import TimestampedValue
from apache_beam.transforms.window import WindowFn
from apache_beam.utils.windowed_value import WindowedValue
class LoggingContext(object):
"""For internal use only; no backwards-compatibility guarantees."""
def enter(self):
pass
def exit(self):
pass
class Receiver(object):
"""For internal use only; no backwards-compatibility guarantees.
An object that consumes a WindowedValue.
This class can be efficiently used to pass values between the
sdk and worker harnesses.
"""
def receive(self, windowed_value):
raise NotImplementedError
class DoFnMethodWrapper(object):
"""For internal use only; no backwards-compatibility guarantees.
Represents a method of a DoFn object."""
def __init__(self, do_fn, method_name):
"""
Initiates a ``DoFnMethodWrapper``.
Args:
do_fn: A DoFn object that contains the method.
method_name: name of the method as a string.
"""
args, _, _, defaults = do_fn.get_function_arguments(method_name)
defaults = defaults if defaults else []
method_value = getattr(do_fn, method_name)
self.method_value = method_value
self.args = args
self.defaults = defaults
class DoFnSignature(object):
"""Represents the signature of a given ``DoFn`` object.
Signature of a ``DoFn`` provides a view of the properties of a given ``DoFn``.
Among other things, this will give an extensible way for for (1) accessing the
structure of the ``DoFn`` including methods and method parameters
(2) identifying features that a given ``DoFn`` support, for example, whether
a given ``DoFn`` is a Splittable ``DoFn`` (
https://s.apache.org/splittable-do-fn) (3) validating a ``DoFn`` based on the
feature set offered by it.
"""
def __init__(self, do_fn):
# We add a property here for all methods defined by Beam DoFn features.
assert isinstance(do_fn, core.DoFn)
self.do_fn = do_fn
self.process_method = DoFnMethodWrapper(do_fn, 'process')
self.start_bundle_method = DoFnMethodWrapper(do_fn, 'start_bundle')
self.finish_bundle_method = DoFnMethodWrapper(do_fn, 'finish_bundle')
self._validate()
def _validate(self):
self._validate_process()
self._validate_bundle_method(self.start_bundle_method)
self._validate_bundle_method(self.finish_bundle_method)
def _validate_process(self):
"""Validate that none of the DoFnParameters are repeated in the function
"""
for param in core.DoFn.DoFnParams:
assert self.process_method.defaults.count(param) <= 1
def _validate_bundle_method(self, method_wrapper):
"""Validate that none of the DoFnParameters are used in the function
"""
for param in core.DoFn.DoFnParams:
assert param not in method_wrapper.defaults
class DoFnInvoker(object):
"""An abstraction that can be used to execute DoFn methods.
A DoFnInvoker describes a particular way for invoking methods of a DoFn
represented by a given DoFnSignature."""
def __init__(self, output_processor, signature):
self.output_processor = output_processor
self.signature = signature
@staticmethod
def create_invoker(
output_processor,
signature, context, side_inputs, input_args, input_kwargs):
""" Creates a new DoFnInvoker based on given arguments.
Args:
signature: a DoFnSignature for the DoFn being invoked.
context: Context to be used when invoking the DoFn (deprecated).
side_inputs: side inputs to be used when invoking th process method.
input_args: arguments to be used when invoking the process method
input_kwargs: kwargs to be used when invoking the process method.
"""
default_arg_values = signature.process_method.defaults
use_simple_invoker = (
not side_inputs and not input_args and not input_kwargs and
not default_arg_values)
if use_simple_invoker:
return SimpleInvoker(output_processor, signature)
else:
return PerWindowInvoker(
output_processor,
signature, context, side_inputs, input_args, input_kwargs)
def invoke_process(self, windowed_value):
"""Invokes the DoFn.process() function.
Args:
windowed_value: a WindowedValue object that gives the element for which
process() method should be invoked along with the window
the element belongs to.
"""
raise NotImplementedError
def invoke_start_bundle(self):
"""Invokes the DoFn.start_bundle() method.
"""
self.output_processor.start_bundle_outputs(
self.signature.start_bundle_method.method_value())
def invoke_finish_bundle(self):
"""Invokes the DoFn.finish_bundle() method.
"""
self.output_processor.finish_bundle_outputs(
self.signature.finish_bundle_method.method_value())
class SimpleInvoker(DoFnInvoker):
"""An invoker that processes elements ignoring windowing information."""
def __init__(self, output_processor, signature):
super(SimpleInvoker, self).__init__(output_processor, signature)
self.process_method = signature.process_method.method_value
def invoke_process(self, windowed_value):
self.output_processor.process_outputs(
windowed_value, self.process_method(windowed_value.value))
class PerWindowInvoker(DoFnInvoker):
"""An invoker that processes elements considering windowing information."""
def __init__(self, output_processor, signature, context,
side_inputs, input_args, input_kwargs):
super(PerWindowInvoker, self).__init__(output_processor, signature)
self.side_inputs = side_inputs
self.context = context
self.process_method = signature.process_method.method_value
default_arg_values = signature.process_method.defaults
self.has_windowed_inputs = (
not all(si.is_globally_windowed() for si in side_inputs) or
(core.DoFn.WindowParam in default_arg_values))
# Try to prepare all the arguments that can just be filled in
# without any additional work. in the process function.
# Also cache all the placeholders needed in the process function.
# Fill in sideInputs if they are globally windowed
global_window = GlobalWindow()
input_args = input_args if input_args else []
input_kwargs = input_kwargs if input_kwargs else {}
if not self.has_windowed_inputs:
input_args, input_kwargs = util.insert_values_in_args(
input_args, input_kwargs, [si[global_window] for si in side_inputs])
arguments = signature.process_method.args
defaults = signature.process_method.defaults
# Create placeholder for element parameter of DoFn.process() method.
self_in_args = int(signature.do_fn.is_process_bounded())
class ArgPlaceholder(object):
def __init__(self, placeholder):
self.placeholder = placeholder
if core.DoFn.ElementParam not in default_arg_values:
args_to_pick = len(arguments) - len(default_arg_values) - 1 - self_in_args
args_with_placeholders = (
[ArgPlaceholder(core.DoFn.ElementParam)] + input_args[:args_to_pick])
else:
args_to_pick = len(arguments) - len(defaults) - self_in_args
args_with_placeholders = input_args[:args_to_pick]
# Fill the OtherPlaceholders for context, window or timestamp
remaining_args_iter = iter(input_args[args_to_pick:])
for a, d in zip(arguments[-len(defaults):], defaults):
if d == core.DoFn.ElementParam:
args_with_placeholders.append(ArgPlaceholder(d))
elif d == core.DoFn.WindowParam:
args_with_placeholders.append(ArgPlaceholder(d))
elif d == core.DoFn.TimestampParam:
args_with_placeholders.append(ArgPlaceholder(d))
elif d == core.DoFn.SideInputParam:
# If no more args are present then the value must be passed via kwarg
try:
args_with_placeholders.append(next(remaining_args_iter))
except StopIteration:
if a not in input_kwargs:
raise ValueError("Value for sideinput %s not provided" % a)
else:
# If no more args are present then the value must be passed via kwarg
try:
args_with_placeholders.append(next(remaining_args_iter))
except StopIteration:
pass
args_with_placeholders.extend(list(remaining_args_iter))
# Stash the list of placeholder positions for performance
self.placeholders = [(i, x.placeholder) for (i, x) in enumerate(
args_with_placeholders)
if isinstance(x, ArgPlaceholder)]
self.args_for_process = args_with_placeholders
self.kwargs_for_process = input_kwargs
def invoke_process(self, windowed_value):
self.context.set_element(windowed_value)
# Call for the process function for each window if has windowed side inputs
# or if the process accesses the window parameter. We can just call it once
# otherwise as none of the arguments are changing
if self.has_windowed_inputs and len(windowed_value.windows) != 1:
for w in windowed_value.windows:
self._invoke_per_window(
WindowedValue(windowed_value.value, windowed_value.timestamp, (w,)))
else:
self._invoke_per_window(windowed_value)
def _invoke_per_window(self, windowed_value):
if self.has_windowed_inputs:
window, = windowed_value.windows
args_for_process, kwargs_for_process = util.insert_values_in_args(
self.args_for_process, self.kwargs_for_process,
[si[window] for si in self.side_inputs])
else:
args_for_process, kwargs_for_process = (
self.args_for_process, self.kwargs_for_process)
# TODO(sourabhbajaj): Investigate why we can't use `is` instead of ==
for i, p in self.placeholders:
if p == core.DoFn.ElementParam:
args_for_process[i] = windowed_value.value
elif p == core.DoFn.WindowParam:
args_for_process[i] = window
elif p == core.DoFn.TimestampParam:
args_for_process[i] = windowed_value.timestamp
if kwargs_for_process:
self.output_processor.process_outputs(
windowed_value,
self.process_method(*args_for_process, **kwargs_for_process))
else:
self.output_processor.process_outputs(
windowed_value, self.process_method(*args_for_process))
class DoFnRunner(Receiver):
"""For internal use only; no backwards-compatibility guarantees.
A helper class for executing ParDo operations.
"""
def __init__(self,
fn,
args,
kwargs,
side_inputs,
windowing,
context=None,
tagged_receivers=None,
logger=None,
step_name=None,
# Preferred alternative to logger
# TODO(robertwb): Remove once all runners are updated.
logging_context=None,
# Preferred alternative to context
# TODO(robertwb): Remove once all runners are updated.
state=None,
scoped_metrics_container=None):
"""Initializes a DoFnRunner.
Args:
fn: user DoFn to invoke
args: positional side input arguments (static and placeholder), if any
kwargs: keyword side input arguments (static and placeholder), if any
side_inputs: list of sideinput.SideInputMaps for deferred side inputs
windowing: windowing properties of the output PCollection(s)
context: a DoFnContext to use (deprecated)
tagged_receivers: a dict of tag name to Receiver objects
logger: a logging module (deprecated)
step_name: the name of this step
logging_context: a LoggingContext object
state: handle for accessing DoFn state
scoped_metrics_container: Context switcher for metrics container
"""
self.scoped_metrics_container = (scoped_metrics_container
or ScopedMetricsContainer())
self.step_name = step_name
# Need to support multiple iterations.
side_inputs = list(side_inputs)
if logging_context:
self.logging_context = logging_context
else:
self.logging_context = get_logging_context(logger, step_name=step_name)
# TODO(sourabh): Deprecate the use of context
if state:
assert context is None
context = DoFnContext(step_name, state=state)
else:
assert context is not None
context = context
self.context = context
do_fn_signature = DoFnSignature(fn)
# Optimize for the common case.
main_receivers = as_receiver(tagged_receivers[None])
output_processor = _OutputProcessor(
windowing.windowfn, main_receivers, tagged_receivers)
self.do_fn_invoker = DoFnInvoker.create_invoker(
output_processor, do_fn_signature, context, side_inputs, args, kwargs)
def receive(self, windowed_value):
self.process(windowed_value)
def process(self, windowed_value):
try:
self.logging_context.enter()
self.scoped_metrics_container.enter()
self.do_fn_invoker.invoke_process(windowed_value)
except BaseException as exn:
self._reraise_augmented(exn)
finally:
self.scoped_metrics_container.exit()
self.logging_context.exit()
def _invoke_bundle_method(self, bundle_method):
try:
self.logging_context.enter()
self.scoped_metrics_container.enter()
self.context.set_element(None)
bundle_method()
except BaseException as exn:
self._reraise_augmented(exn)
finally:
self.scoped_metrics_container.exit()
self.logging_context.exit()
def start(self):
self._invoke_bundle_method(self.do_fn_invoker.invoke_start_bundle)
def finish(self):
self._invoke_bundle_method(self.do_fn_invoker.invoke_finish_bundle)
def _reraise_augmented(self, exn):
if getattr(exn, '_tagged_with_step', False) or not self.step_name:
raise
step_annotation = " [while running '%s']" % self.step_name
# To emulate exception chaining (not available in Python 2).
original_traceback = sys.exc_info()[2]
try:
# Attempt to construct the same kind of exception
# with an augmented message.
new_exn = type(exn)(exn.args[0] + step_annotation, *exn.args[1:])
new_exn._tagged_with_step = True # Could raise attribute error.
except: # pylint: disable=bare-except
# If anything goes wrong, construct a RuntimeError whose message
# records the original exception's type and message.
new_exn = RuntimeError(
traceback.format_exception_only(type(exn), exn)[-1].strip()
+ step_annotation)
new_exn._tagged_with_step = True
raise new_exn, None, original_traceback
class _OutputProcessor(object):
"""Processes output produced by DoFn method invocations."""
def __init__(self, window_fn, main_receivers, tagged_receivers):
"""Initializes ``_OutputProcessor``.
Args:
window_fn: a windowing function (WindowFn).
main_receivers: a dict of tag name to Receiver objects.
tagged_receivers: main receiver object.
"""
self.window_fn = window_fn
self.main_receivers = main_receivers
self.tagged_receivers = tagged_receivers
def process_outputs(self, windowed_input_element, results):
"""Dispatch the result of process computation to the appropriate receivers.
A value wrapped in a TaggedOutput object will be unwrapped and
then dispatched to the appropriate indexed output.
"""
if results is None:
return
for result in results:
tag = None
if isinstance(result, TaggedOutput):
tag = result.tag
if not isinstance(tag, basestring):
raise TypeError('In %s, tag %s is not a string' % (self, tag))
result = result.value
if isinstance(result, WindowedValue):
windowed_value = result
if (windowed_input_element is not None
and len(windowed_input_element.windows) != 1):
windowed_value.windows *= len(windowed_input_element.windows)
elif isinstance(result, TimestampedValue):
assign_context = WindowFn.AssignContext(result.timestamp, result.value)
windowed_value = WindowedValue(
result.value, result.timestamp,
self.window_fn.assign(assign_context))
if len(windowed_input_element.windows) != 1:
windowed_value.windows *= len(windowed_input_element.windows)
else:
windowed_value = windowed_input_element.with_value(result)
if tag is None:
self.main_receivers.receive(windowed_value)
else:
self.tagged_receivers[tag].output(windowed_value)
def start_bundle_outputs(self, results):
"""Validate that start_bundle does not output any elements"""
if results is None:
return
raise RuntimeError(
'Start Bundle should not output any elements but got %s' % results)
def finish_bundle_outputs(self, results):
"""Dispatch the result of finish_bundle to the appropriate receivers.
A value wrapped in a TaggedOutput object will be unwrapped and
then dispatched to the appropriate indexed output.
"""
if results is None:
return
for result in results:
tag = None
if isinstance(result, TaggedOutput):
tag = result.tag
if not isinstance(tag, basestring):
raise TypeError('In %s, tag %s is not a string' % (self, tag))
result = result.value
if isinstance(result, WindowedValue):
windowed_value = result
else:
raise RuntimeError('Finish Bundle should only output WindowedValue ' +\
'type but got %s' % type(result))
if tag is None:
self.main_receivers.receive(windowed_value)
else:
self.tagged_receivers[tag].output(windowed_value)
class _NoContext(WindowFn.AssignContext):
"""An uninspectable WindowFn.AssignContext."""
NO_VALUE = object()
def __init__(self, value, timestamp=NO_VALUE):
self.value = value
self._timestamp = timestamp
@property
def timestamp(self):
if self._timestamp is self.NO_VALUE:
raise ValueError('No timestamp in this context.')
else:
return self._timestamp
@property
def existing_windows(self):
raise ValueError('No existing_windows in this context.')
class DoFnState(object):
"""For internal use only; no backwards-compatibility guarantees.
Keeps track of state that DoFns want, currently, user counters.
"""
def __init__(self, counter_factory):
self.step_name = ''
self._counter_factory = counter_factory
def counter_for(self, aggregator):
"""Looks up the counter for this aggregator, creating one if necessary."""
return self._counter_factory.get_aggregator_counter(
self.step_name, aggregator)
# TODO(robertwb): Replace core.DoFnContext with this.
class DoFnContext(object):
"""For internal use only; no backwards-compatibility guarantees."""
def __init__(self, label, element=None, state=None):
self.label = label
self.state = state
if element is not None:
self.set_element(element)
def set_element(self, windowed_value):
self.windowed_value = windowed_value
@property
def element(self):
if self.windowed_value is None:
raise AttributeError('element not accessible in this context')
else:
return self.windowed_value.value
@property
def timestamp(self):
if self.windowed_value is None:
raise AttributeError('timestamp not accessible in this context')
else:
return self.windowed_value.timestamp
@property
def windows(self):
if self.windowed_value is None:
raise AttributeError('windows not accessible in this context')
else:
return self.windowed_value.windows
# TODO(robertwb): Remove all these adapters once service is updated out.
class _LoggingContextAdapter(LoggingContext):
def __init__(self, underlying):
self.underlying = underlying
def enter(self):
self.underlying.enter()
def exit(self):
self.underlying.exit()
def get_logging_context(maybe_logger, **kwargs):
if maybe_logger:
maybe_context = maybe_logger.PerThreadLoggingContext(**kwargs)
if isinstance(maybe_context, LoggingContext):
return maybe_context
return _LoggingContextAdapter(maybe_context)
return LoggingContext()
class _ReceiverAdapter(Receiver):
def __init__(self, underlying):
self.underlying = underlying
def receive(self, windowed_value):
self.underlying.output(windowed_value)
def as_receiver(maybe_receiver):
"""For internal use only; no backwards-compatibility guarantees."""
if isinstance(maybe_receiver, Receiver):
return maybe_receiver
return _ReceiverAdapter(maybe_receiver)
|
|
# -*- coding: utf-8 -*-
"""
jinja2.parser
~~~~~~~~~~~~~
Implements the template parser.
:copyright: (c) 2017 by the Jinja Team.
:license: BSD, see LICENSE for more details.
"""
from . import nodes
from .exceptions import TemplateSyntaxError, TemplateAssertionError
from .lexer import describe_token, describe_token_expr
from ._compat import imap
_statement_keywords = frozenset(['for', 'if', 'block', 'extends', 'print',
'macro', 'include', 'from', 'import',
'set', 'with', 'autoescape'])
_compare_operators = frozenset(['eq', 'ne', 'lt', 'lteq', 'gt', 'gteq'])
_math_nodes = {
'add': nodes.Add,
'sub': nodes.Sub,
'mul': nodes.Mul,
'div': nodes.Div,
'floordiv': nodes.FloorDiv,
'mod': nodes.Mod,
}
class Parser(object):
"""This is the central parsing class Jinja2 uses. It's passed to
extensions and can be used to parse expressions or statements.
"""
def __init__(self, environment, source, name=None, filename=None,
state=None):
self.environment = environment
self.stream = environment._tokenize(source, name, filename, state)
self.name = name
self.filename = filename
self.closed = False
self.extensions = {}
for extension in environment.iter_extensions():
for tag in extension.tags:
self.extensions[tag] = extension.parse
self._last_identifier = 0
self._tag_stack = []
self._end_token_stack = []
def fail(self, msg, lineno=None, exc=TemplateSyntaxError):
"""Convenience method that raises `exc` with the message, passed
line number or last line number as well as the current name and
filename.
"""
if lineno is None:
lineno = self.stream.current.lineno
raise exc(msg, lineno, self.name, self.filename)
def _fail_ut_eof(self, name, end_token_stack, lineno):
expected = []
for exprs in end_token_stack:
expected.extend(imap(describe_token_expr, exprs))
if end_token_stack:
currently_looking = ' or '.join(
"'%s'" % describe_token_expr(expr)
for expr in end_token_stack[-1])
else:
currently_looking = None
if name is None:
message = ['Unexpected end of template.']
else:
message = ['Encountered unknown tag \'%s\'.' % name]
if currently_looking:
if name is not None and name in expected:
message.append('You probably made a nesting mistake. Jinja '
'is expecting this tag, but currently looking '
'for %s.' % currently_looking)
else:
message.append('Jinja was looking for the following tags: '
'%s.' % currently_looking)
if self._tag_stack:
message.append('The innermost block that needs to be '
'closed is \'%s\'.' % self._tag_stack[-1])
self.fail(' '.join(message), lineno)
def fail_unknown_tag(self, name, lineno=None):
"""Called if the parser encounters an unknown tag. Tries to fail
with a human readable error message that could help to identify
the problem.
"""
return self._fail_ut_eof(name, self._end_token_stack, lineno)
def fail_eof(self, end_tokens=None, lineno=None):
"""Like fail_unknown_tag but for end of template situations."""
stack = list(self._end_token_stack)
if end_tokens is not None:
stack.append(end_tokens)
return self._fail_ut_eof(None, stack, lineno)
def is_tuple_end(self, extra_end_rules=None):
"""Are we at the end of a tuple?"""
if self.stream.current.type in ('variable_end', 'block_end', 'rparen'):
return True
elif extra_end_rules is not None:
return self.stream.current.test_any(extra_end_rules)
return False
def free_identifier(self, lineno=None):
"""Return a new free identifier as :class:`~jinja2.nodes.InternalName`."""
self._last_identifier += 1
rv = object.__new__(nodes.InternalName)
nodes.Node.__init__(rv, 'fi%d' % self._last_identifier, lineno=lineno)
return rv
def parse_statement(self):
"""Parse a single statement."""
token = self.stream.current
if token.type != 'name':
self.fail('tag name expected', token.lineno)
self._tag_stack.append(token.value)
pop_tag = True
try:
if token.value in _statement_keywords:
return getattr(self, 'parse_' + self.stream.current.value)()
if token.value == 'call':
return self.parse_call_block()
if token.value == 'filter':
return self.parse_filter_block()
ext = self.extensions.get(token.value)
if ext is not None:
return ext(self)
# did not work out, remove the token we pushed by accident
# from the stack so that the unknown tag fail function can
# produce a proper error message.
self._tag_stack.pop()
pop_tag = False
self.fail_unknown_tag(token.value, token.lineno)
finally:
if pop_tag:
self._tag_stack.pop()
def parse_statements(self, end_tokens, drop_needle=False):
"""Parse multiple statements into a list until one of the end tokens
is reached. This is used to parse the body of statements as it also
parses template data if appropriate. The parser checks first if the
current token is a colon and skips it if there is one. Then it checks
for the block end and parses until if one of the `end_tokens` is
reached. Per default the active token in the stream at the end of
the call is the matched end token. If this is not wanted `drop_needle`
can be set to `True` and the end token is removed.
"""
# the first token may be a colon for python compatibility
self.stream.skip_if('colon')
# in the future it would be possible to add whole code sections
# by adding some sort of end of statement token and parsing those here.
self.stream.expect('block_end')
result = self.subparse(end_tokens)
# we reached the end of the template too early, the subparser
# does not check for this, so we do that now
if self.stream.current.type == 'eof':
self.fail_eof(end_tokens)
if drop_needle:
next(self.stream)
return result
def parse_set(self):
"""Parse an assign statement."""
lineno = next(self.stream).lineno
target = self.parse_assign_target(with_namespace=True)
if self.stream.skip_if('assign'):
expr = self.parse_tuple()
return nodes.Assign(target, expr, lineno=lineno)
filter_node = self.parse_filter(None)
body = self.parse_statements(('name:endset',),
drop_needle=True)
return nodes.AssignBlock(target, filter_node, body, lineno=lineno)
def parse_for(self):
"""Parse a for loop."""
lineno = self.stream.expect('name:for').lineno
target = self.parse_assign_target(extra_end_rules=('name:in',))
self.stream.expect('name:in')
iter = self.parse_tuple(with_condexpr=False,
extra_end_rules=('name:recursive',))
test = None
if self.stream.skip_if('name:if'):
test = self.parse_expression()
recursive = self.stream.skip_if('name:recursive')
body = self.parse_statements(('name:endfor', 'name:else'))
if next(self.stream).value == 'endfor':
else_ = []
else:
else_ = self.parse_statements(('name:endfor',), drop_needle=True)
return nodes.For(target, iter, body, else_, test,
recursive, lineno=lineno)
def parse_if(self):
"""Parse an if construct."""
node = result = nodes.If(lineno=self.stream.expect('name:if').lineno)
while 1:
node.test = self.parse_tuple(with_condexpr=False)
node.body = self.parse_statements(('name:elif', 'name:else',
'name:endif'))
node.elif_ = []
node.else_ = []
token = next(self.stream)
if token.test('name:elif'):
node = nodes.If(lineno=self.stream.current.lineno)
result.elif_.append(node)
continue
elif token.test('name:else'):
result.else_ = self.parse_statements(('name:endif',),
drop_needle=True)
break
return result
def parse_with(self):
node = nodes.With(lineno=next(self.stream).lineno)
targets = []
values = []
while self.stream.current.type != 'block_end':
lineno = self.stream.current.lineno
if targets:
self.stream.expect('comma')
target = self.parse_assign_target()
target.set_ctx('param')
targets.append(target)
self.stream.expect('assign')
values.append(self.parse_expression())
node.targets = targets
node.values = values
node.body = self.parse_statements(('name:endwith',),
drop_needle=True)
return node
def parse_autoescape(self):
node = nodes.ScopedEvalContextModifier(lineno=next(self.stream).lineno)
node.options = [
nodes.Keyword('autoescape', self.parse_expression())
]
node.body = self.parse_statements(('name:endautoescape',),
drop_needle=True)
return nodes.Scope([node])
def parse_block(self):
node = nodes.Block(lineno=next(self.stream).lineno)
node.name = self.stream.expect('name').value
node.scoped = self.stream.skip_if('name:scoped')
# common problem people encounter when switching from django
# to jinja. we do not support hyphens in block names, so let's
# raise a nicer error message in that case.
if self.stream.current.type == 'sub':
self.fail('Block names in Jinja have to be valid Python '
'identifiers and may not contain hyphens, use an '
'underscore instead.')
node.body = self.parse_statements(('name:endblock',), drop_needle=True)
self.stream.skip_if('name:' + node.name)
return node
def parse_extends(self):
node = nodes.Extends(lineno=next(self.stream).lineno)
node.template = self.parse_expression()
return node
def parse_import_context(self, node, default):
if self.stream.current.test_any('name:with', 'name:without') and \
self.stream.look().test('name:context'):
node.with_context = next(self.stream).value == 'with'
self.stream.skip()
else:
node.with_context = default
return node
def parse_include(self):
node = nodes.Include(lineno=next(self.stream).lineno)
node.template = self.parse_expression()
if self.stream.current.test('name:ignore') and \
self.stream.look().test('name:missing'):
node.ignore_missing = True
self.stream.skip(2)
else:
node.ignore_missing = False
return self.parse_import_context(node, True)
def parse_import(self):
node = nodes.Import(lineno=next(self.stream).lineno)
node.template = self.parse_expression()
self.stream.expect('name:as')
node.target = self.parse_assign_target(name_only=True).name
return self.parse_import_context(node, False)
def parse_from(self):
node = nodes.FromImport(lineno=next(self.stream).lineno)
node.template = self.parse_expression()
self.stream.expect('name:import')
node.names = []
def parse_context():
if self.stream.current.value in ('with', 'without') and \
self.stream.look().test('name:context'):
node.with_context = next(self.stream).value == 'with'
self.stream.skip()
return True
return False
while 1:
if node.names:
self.stream.expect('comma')
if self.stream.current.type == 'name':
if parse_context():
break
target = self.parse_assign_target(name_only=True)
if target.name.startswith('_'):
self.fail('names starting with an underline can not '
'be imported', target.lineno,
exc=TemplateAssertionError)
if self.stream.skip_if('name:as'):
alias = self.parse_assign_target(name_only=True)
node.names.append((target.name, alias.name))
else:
node.names.append(target.name)
if parse_context() or self.stream.current.type != 'comma':
break
else:
self.stream.expect('name')
if not hasattr(node, 'with_context'):
node.with_context = False
return node
def parse_signature(self, node):
node.args = args = []
node.defaults = defaults = []
self.stream.expect('lparen')
while self.stream.current.type != 'rparen':
if args:
self.stream.expect('comma')
arg = self.parse_assign_target(name_only=True)
arg.set_ctx('param')
if self.stream.skip_if('assign'):
defaults.append(self.parse_expression())
elif defaults:
self.fail('non-default argument follows default argument')
args.append(arg)
self.stream.expect('rparen')
def parse_call_block(self):
node = nodes.CallBlock(lineno=next(self.stream).lineno)
if self.stream.current.type == 'lparen':
self.parse_signature(node)
else:
node.args = []
node.defaults = []
node.call = self.parse_expression()
if not isinstance(node.call, nodes.Call):
self.fail('expected call', node.lineno)
node.body = self.parse_statements(('name:endcall',), drop_needle=True)
return node
def parse_filter_block(self):
node = nodes.FilterBlock(lineno=next(self.stream).lineno)
node.filter = self.parse_filter(None, start_inline=True)
node.body = self.parse_statements(('name:endfilter',),
drop_needle=True)
return node
def parse_macro(self):
node = nodes.Macro(lineno=next(self.stream).lineno)
node.name = self.parse_assign_target(name_only=True).name
self.parse_signature(node)
node.body = self.parse_statements(('name:endmacro',),
drop_needle=True)
return node
def parse_print(self):
node = nodes.Output(lineno=next(self.stream).lineno)
node.nodes = []
while self.stream.current.type != 'block_end':
if node.nodes:
self.stream.expect('comma')
node.nodes.append(self.parse_expression())
return node
def parse_assign_target(self, with_tuple=True, name_only=False,
extra_end_rules=None, with_namespace=False):
"""Parse an assignment target. As Jinja2 allows assignments to
tuples, this function can parse all allowed assignment targets. Per
default assignments to tuples are parsed, that can be disable however
by setting `with_tuple` to `False`. If only assignments to names are
wanted `name_only` can be set to `True`. The `extra_end_rules`
parameter is forwarded to the tuple parsing function. If
`with_namespace` is enabled, a namespace assignment may be parsed.
"""
if with_namespace and self.stream.look().type == 'dot':
token = self.stream.expect('name')
next(self.stream) # dot
attr = self.stream.expect('name')
target = nodes.NSRef(token.value, attr.value, lineno=token.lineno)
elif name_only:
token = self.stream.expect('name')
target = nodes.Name(token.value, 'store', lineno=token.lineno)
else:
if with_tuple:
target = self.parse_tuple(simplified=True,
extra_end_rules=extra_end_rules)
else:
target = self.parse_primary()
target.set_ctx('store')
if not target.can_assign():
self.fail('can\'t assign to %r' % target.__class__.
__name__.lower(), target.lineno)
return target
def parse_expression(self, with_condexpr=True):
"""Parse an expression. Per default all expressions are parsed, if
the optional `with_condexpr` parameter is set to `False` conditional
expressions are not parsed.
"""
if with_condexpr:
return self.parse_condexpr()
return self.parse_or()
def parse_condexpr(self):
lineno = self.stream.current.lineno
expr1 = self.parse_or()
while self.stream.skip_if('name:if'):
expr2 = self.parse_or()
if self.stream.skip_if('name:else'):
expr3 = self.parse_condexpr()
else:
expr3 = None
expr1 = nodes.CondExpr(expr2, expr1, expr3, lineno=lineno)
lineno = self.stream.current.lineno
return expr1
def parse_or(self):
lineno = self.stream.current.lineno
left = self.parse_and()
while self.stream.skip_if('name:or'):
right = self.parse_and()
left = nodes.Or(left, right, lineno=lineno)
lineno = self.stream.current.lineno
return left
def parse_and(self):
lineno = self.stream.current.lineno
left = self.parse_not()
while self.stream.skip_if('name:and'):
right = self.parse_not()
left = nodes.And(left, right, lineno=lineno)
lineno = self.stream.current.lineno
return left
def parse_not(self):
if self.stream.current.test('name:not'):
lineno = next(self.stream).lineno
return nodes.Not(self.parse_not(), lineno=lineno)
return self.parse_compare()
def parse_compare(self):
lineno = self.stream.current.lineno
expr = self.parse_math1()
ops = []
while 1:
token_type = self.stream.current.type
if token_type in _compare_operators:
next(self.stream)
ops.append(nodes.Operand(token_type, self.parse_math1()))
elif self.stream.skip_if('name:in'):
ops.append(nodes.Operand('in', self.parse_math1()))
elif (self.stream.current.test('name:not') and
self.stream.look().test('name:in')):
self.stream.skip(2)
ops.append(nodes.Operand('notin', self.parse_math1()))
else:
break
lineno = self.stream.current.lineno
if not ops:
return expr
return nodes.Compare(expr, ops, lineno=lineno)
def parse_math1(self):
lineno = self.stream.current.lineno
left = self.parse_concat()
while self.stream.current.type in ('add', 'sub'):
cls = _math_nodes[self.stream.current.type]
next(self.stream)
right = self.parse_concat()
left = cls(left, right, lineno=lineno)
lineno = self.stream.current.lineno
return left
def parse_concat(self):
lineno = self.stream.current.lineno
args = [self.parse_math2()]
while self.stream.current.type == 'tilde':
next(self.stream)
args.append(self.parse_math2())
if len(args) == 1:
return args[0]
return nodes.Concat(args, lineno=lineno)
def parse_math2(self):
lineno = self.stream.current.lineno
left = self.parse_pow()
while self.stream.current.type in ('mul', 'div', 'floordiv', 'mod'):
cls = _math_nodes[self.stream.current.type]
next(self.stream)
right = self.parse_pow()
left = cls(left, right, lineno=lineno)
lineno = self.stream.current.lineno
return left
def parse_pow(self):
lineno = self.stream.current.lineno
left = self.parse_unary()
while self.stream.current.type == 'pow':
next(self.stream)
right = self.parse_unary()
left = nodes.Pow(left, right, lineno=lineno)
lineno = self.stream.current.lineno
return left
def parse_unary(self, with_filter=True):
token_type = self.stream.current.type
lineno = self.stream.current.lineno
if token_type == 'sub':
next(self.stream)
node = nodes.Neg(self.parse_unary(False), lineno=lineno)
elif token_type == 'add':
next(self.stream)
node = nodes.Pos(self.parse_unary(False), lineno=lineno)
else:
node = self.parse_primary()
node = self.parse_postfix(node)
if with_filter:
node = self.parse_filter_expr(node)
return node
def parse_primary(self):
token = self.stream.current
if token.type == 'name':
if token.value in ('true', 'false', 'True', 'False'):
node = nodes.Const(token.value in ('true', 'True'),
lineno=token.lineno)
elif token.value in ('none', 'None'):
node = nodes.Const(None, lineno=token.lineno)
else:
node = nodes.Name(token.value, 'load', lineno=token.lineno)
next(self.stream)
elif token.type == 'string':
next(self.stream)
buf = [token.value]
lineno = token.lineno
while self.stream.current.type == 'string':
buf.append(self.stream.current.value)
next(self.stream)
node = nodes.Const(''.join(buf), lineno=lineno)
elif token.type in ('integer', 'float'):
next(self.stream)
node = nodes.Const(token.value, lineno=token.lineno)
elif token.type == 'lparen':
next(self.stream)
node = self.parse_tuple(explicit_parentheses=True)
self.stream.expect('rparen')
elif token.type == 'lbracket':
node = self.parse_list()
elif token.type == 'lbrace':
node = self.parse_dict()
else:
self.fail("unexpected '%s'" % describe_token(token), token.lineno)
return node
def parse_tuple(self, simplified=False, with_condexpr=True,
extra_end_rules=None, explicit_parentheses=False):
"""Works like `parse_expression` but if multiple expressions are
delimited by a comma a :class:`~jinja2.nodes.Tuple` node is created.
This method could also return a regular expression instead of a tuple
if no commas where found.
The default parsing mode is a full tuple. If `simplified` is `True`
only names and literals are parsed. The `no_condexpr` parameter is
forwarded to :meth:`parse_expression`.
Because tuples do not require delimiters and may end in a bogus comma
an extra hint is needed that marks the end of a tuple. For example
for loops support tuples between `for` and `in`. In that case the
`extra_end_rules` is set to ``['name:in']``.
`explicit_parentheses` is true if the parsing was triggered by an
expression in parentheses. This is used to figure out if an empty
tuple is a valid expression or not.
"""
lineno = self.stream.current.lineno
if simplified:
parse = self.parse_primary
elif with_condexpr:
parse = self.parse_expression
else:
parse = lambda: self.parse_expression(with_condexpr=False)
args = []
is_tuple = False
while 1:
if args:
self.stream.expect('comma')
if self.is_tuple_end(extra_end_rules):
break
args.append(parse())
if self.stream.current.type == 'comma':
is_tuple = True
else:
break
lineno = self.stream.current.lineno
if not is_tuple:
if args:
return args[0]
# if we don't have explicit parentheses, an empty tuple is
# not a valid expression. This would mean nothing (literally
# nothing) in the spot of an expression would be an empty
# tuple.
if not explicit_parentheses:
self.fail('Expected an expression, got \'%s\'' %
describe_token(self.stream.current))
return nodes.Tuple(args, 'load', lineno=lineno)
def parse_list(self):
token = self.stream.expect('lbracket')
items = []
while self.stream.current.type != 'rbracket':
if items:
self.stream.expect('comma')
if self.stream.current.type == 'rbracket':
break
items.append(self.parse_expression())
self.stream.expect('rbracket')
return nodes.List(items, lineno=token.lineno)
def parse_dict(self):
token = self.stream.expect('lbrace')
items = []
while self.stream.current.type != 'rbrace':
if items:
self.stream.expect('comma')
if self.stream.current.type == 'rbrace':
break
key = self.parse_expression()
self.stream.expect('colon')
value = self.parse_expression()
items.append(nodes.Pair(key, value, lineno=key.lineno))
self.stream.expect('rbrace')
return nodes.Dict(items, lineno=token.lineno)
def parse_postfix(self, node):
while 1:
token_type = self.stream.current.type
if token_type == 'dot' or token_type == 'lbracket':
node = self.parse_subscript(node)
# calls are valid both after postfix expressions (getattr
# and getitem) as well as filters and tests
elif token_type == 'lparen':
node = self.parse_call(node)
else:
break
return node
def parse_filter_expr(self, node):
while 1:
token_type = self.stream.current.type
if token_type == 'pipe':
node = self.parse_filter(node)
elif token_type == 'name' and self.stream.current.value == 'is':
node = self.parse_test(node)
# calls are valid both after postfix expressions (getattr
# and getitem) as well as filters and tests
elif token_type == 'lparen':
node = self.parse_call(node)
else:
break
return node
def parse_subscript(self, node):
token = next(self.stream)
if token.type == 'dot':
attr_token = self.stream.current
next(self.stream)
if attr_token.type == 'name':
return nodes.Getattr(node, attr_token.value, 'load',
lineno=token.lineno)
elif attr_token.type != 'integer':
self.fail('expected name or number', attr_token.lineno)
arg = nodes.Const(attr_token.value, lineno=attr_token.lineno)
return nodes.Getitem(node, arg, 'load', lineno=token.lineno)
if token.type == 'lbracket':
args = []
while self.stream.current.type != 'rbracket':
if args:
self.stream.expect('comma')
args.append(self.parse_subscribed())
self.stream.expect('rbracket')
if len(args) == 1:
arg = args[0]
else:
arg = nodes.Tuple(args, 'load', lineno=token.lineno)
return nodes.Getitem(node, arg, 'load', lineno=token.lineno)
self.fail('expected subscript expression', self.lineno)
def parse_subscribed(self):
lineno = self.stream.current.lineno
if self.stream.current.type == 'colon':
next(self.stream)
args = [None]
else:
node = self.parse_expression()
if self.stream.current.type != 'colon':
return node
next(self.stream)
args = [node]
if self.stream.current.type == 'colon':
args.append(None)
elif self.stream.current.type not in ('rbracket', 'comma'):
args.append(self.parse_expression())
else:
args.append(None)
if self.stream.current.type == 'colon':
next(self.stream)
if self.stream.current.type not in ('rbracket', 'comma'):
args.append(self.parse_expression())
else:
args.append(None)
else:
args.append(None)
return nodes.Slice(lineno=lineno, *args)
def parse_call(self, node):
token = self.stream.expect('lparen')
args = []
kwargs = []
dyn_args = dyn_kwargs = None
require_comma = False
def ensure(expr):
if not expr:
self.fail('invalid syntax for function call expression',
token.lineno)
while self.stream.current.type != 'rparen':
if require_comma:
self.stream.expect('comma')
# support for trailing comma
if self.stream.current.type == 'rparen':
break
if self.stream.current.type == 'mul':
ensure(dyn_args is None and dyn_kwargs is None)
next(self.stream)
dyn_args = self.parse_expression()
elif self.stream.current.type == 'pow':
ensure(dyn_kwargs is None)
next(self.stream)
dyn_kwargs = self.parse_expression()
else:
ensure(dyn_args is None and dyn_kwargs is None)
if self.stream.current.type == 'name' and \
self.stream.look().type == 'assign':
key = self.stream.current.value
self.stream.skip(2)
value = self.parse_expression()
kwargs.append(nodes.Keyword(key, value,
lineno=value.lineno))
else:
ensure(not kwargs)
args.append(self.parse_expression())
require_comma = True
self.stream.expect('rparen')
if node is None:
return args, kwargs, dyn_args, dyn_kwargs
return nodes.Call(node, args, kwargs, dyn_args, dyn_kwargs,
lineno=token.lineno)
def parse_filter(self, node, start_inline=False):
while self.stream.current.type == 'pipe' or start_inline:
if not start_inline:
next(self.stream)
token = self.stream.expect('name')
name = token.value
while self.stream.current.type == 'dot':
next(self.stream)
name += '.' + self.stream.expect('name').value
if self.stream.current.type == 'lparen':
args, kwargs, dyn_args, dyn_kwargs = self.parse_call(None)
else:
args = []
kwargs = []
dyn_args = dyn_kwargs = None
node = nodes.Filter(node, name, args, kwargs, dyn_args,
dyn_kwargs, lineno=token.lineno)
start_inline = False
return node
def parse_test(self, node):
token = next(self.stream)
if self.stream.current.test('name:not'):
next(self.stream)
negated = True
else:
negated = False
name = self.stream.expect('name').value
while self.stream.current.type == 'dot':
next(self.stream)
name += '.' + self.stream.expect('name').value
dyn_args = dyn_kwargs = None
kwargs = []
if self.stream.current.type == 'lparen':
args, kwargs, dyn_args, dyn_kwargs = self.parse_call(None)
elif (self.stream.current.type in ('name', 'string', 'integer',
'float', 'lparen', 'lbracket',
'lbrace') and not
self.stream.current.test_any('name:else', 'name:or',
'name:and')):
if self.stream.current.test('name:is'):
self.fail('You cannot chain multiple tests with is')
args = [self.parse_primary()]
else:
args = []
node = nodes.Test(node, name, args, kwargs, dyn_args,
dyn_kwargs, lineno=token.lineno)
if negated:
node = nodes.Not(node, lineno=token.lineno)
return node
def subparse(self, end_tokens=None):
body = []
data_buffer = []
add_data = data_buffer.append
if end_tokens is not None:
self._end_token_stack.append(end_tokens)
def flush_data():
if data_buffer:
lineno = data_buffer[0].lineno
body.append(nodes.Output(data_buffer[:], lineno=lineno))
del data_buffer[:]
try:
while self.stream:
token = self.stream.current
if token.type == 'data':
if token.value:
add_data(nodes.TemplateData(token.value,
lineno=token.lineno))
next(self.stream)
elif token.type == 'variable_begin':
next(self.stream)
add_data(self.parse_tuple(with_condexpr=True))
self.stream.expect('variable_end')
elif token.type == 'block_begin':
flush_data()
next(self.stream)
if end_tokens is not None and \
self.stream.current.test_any(*end_tokens):
return body
rv = self.parse_statement()
if isinstance(rv, list):
body.extend(rv)
else:
body.append(rv)
self.stream.expect('block_end')
else:
raise AssertionError('internal parsing error')
flush_data()
finally:
if end_tokens is not None:
self._end_token_stack.pop()
return body
def parse(self):
"""Parse the whole template into a `Template` node."""
result = nodes.Template(self.subparse(), lineno=1)
result.set_environment(self.environment)
return result
|
|
# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Various function for graph editing."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.contrib.graph_editor import reroute
from tensorflow.contrib.graph_editor import select
from tensorflow.contrib.graph_editor import subgraph
from tensorflow.contrib.graph_editor import util
from tensorflow.python.ops import array_ops as tf_array_ops
__all__ = [
"detach_control_inputs",
"detach_control_outputs",
"detach_inputs",
"detach_outputs",
"detach",
"connect",
"bypass",
]
def detach_control_inputs(sgv):
"""Detach all the external control inputs of the subgraph sgv.
Args:
sgv: the subgraph view to be detached. This argument is converted to a
subgraph using the same rules as the function subgraph.make_view.
"""
sgv = subgraph.make_view(sgv)
for op in sgv.ops:
cops = [cop for cop in op.control_inputs if cop not in sgv.ops]
reroute.remove_control_inputs(op, cops)
def detach_control_outputs(sgv, control_outputs):
"""Detach all the external control outputs of the subgraph sgv.
Args:
sgv: the subgraph view to be detached. This argument is converted to a
subgraph using the same rules as the function subgraph.make_view.
control_outputs: a util.ControlOutputs instance.
"""
if not isinstance(control_outputs, util.ControlOutputs):
raise TypeError("Expected a util.ControlOutputs, got: {}",
type(control_outputs))
control_outputs.update()
sgv = subgraph.make_view(sgv)
for op in sgv.ops:
for cop in control_outputs.get(op):
if cop not in sgv.ops:
reroute.remove_control_inputs(cop, op)
def detach_inputs(sgv, control_inputs=False):
"""Detach the inputs of a subgraph view.
Args:
sgv: the subgraph view to be detached. This argument is converted to a
subgraph using the same rules as the function subgraph.make_view.
Note that sgv is modified in place.
control_inputs: if True control_inputs are also detached.
Returns:
A tuple `(sgv, input_placeholders)` where
`sgv` is a new subgraph view of the detached subgraph;
`input_placeholders` is a list of the created input placeholders.
Raises:
StandardError: if sgv cannot be converted to a SubGraphView using
the same rules than the function subgraph.make_view.
"""
sgv = subgraph.make_view(sgv)
with sgv.graph.as_default():
input_placeholders = [
tf_array_ops.placeholder(
dtype=input_t.dtype, name=util.placeholder_name(input_t))
for input_t in sgv.inputs
]
reroute.swap_inputs(sgv, input_placeholders)
if control_inputs:
detach_control_inputs(sgv)
return sgv, input_placeholders
def detach_outputs(sgv, control_outputs=None):
"""Detach the output of a subgraph view.
Args:
sgv: the subgraph view to be detached. This argument is converted to a
subgraph using the same rules as the function subgraph.make_view.
Note that sgv is modified in place.
control_outputs: a util.ControlOutputs instance or None. If not None the
control outputs are also detached.
Returns:
A tuple `(sgv, output_placeholders)` where
`sgv` is a new subgraph view of the detached subgraph;
`output_placeholders` is a list of the created output placeholders.
Raises:
StandardError: if sgv cannot be converted to a SubGraphView using
the same rules than the function subgraph.make_view.
"""
sgv = subgraph.make_view(sgv)
# only select outputs with consumers
sgv_ = sgv.remap_outputs([output_id
for output_id, output_t in enumerate(sgv.outputs)
if output_t.consumers()])
# create consumer subgraph and remap
consumers_sgv = subgraph.SubGraphView(sgv_.consumers())
consumers_sgv = consumers_sgv.remap_inputs(
[input_id for input_id, input_t in enumerate(consumers_sgv.inputs)
if input_t in sgv_.outputs])
with sgv_.graph.as_default():
output_placeholders = [
util.make_placeholder_from_tensor(input_t)
for input_t in consumers_sgv.inputs
]
reroute.swap_outputs(sgv_, output_placeholders)
if control_outputs is not None:
detach_control_outputs(sgv_, control_outputs)
return sgv_, output_placeholders
def detach(sgv, control_inputs=False, control_outputs=None, control_ios=None):
"""Detach both the inputs and the outputs of a subgraph view.
Args:
sgv: the subgraph view to be detached. This argument is converted to a
subgraph using the same rules as the function subgraph.make_view.
Note that sgv is modified in place.
control_inputs: A boolean indicating whether control inputs are enabled.
control_outputs: An instance of util.ControlOutputs or None. If not None,
control outputs are enabled.
control_ios: An instance of util.ControlOutputs or None. If not None, both
control inputs and control outputs are enabled. This is equivalent to set
control_inputs to True and control_outputs to the util.ControlOutputs
instance.
Returns:
A tuple `(sgv, detached_inputs, detached_outputs)` where:
`sgv` is a new subgraph view of the detached subgraph;
`detach_inputs` is a list of the created input placeholders;
`detach_outputs` is a list of the created output placeholders.
Raises:
StandardError: if sgv cannot be converted to a SubGraphView using
the same rules than the function subgraph.make_view.
"""
control_inputs, control_outputs = select.check_cios(control_inputs,
control_outputs,
control_ios)
_, detached_inputs = detach_inputs(sgv, control_inputs)
_, detached_outputs = detach_outputs(sgv, control_outputs)
return sgv, detached_inputs, detached_outputs
def connect(sgv0, sgv1, disconnect_first=False):
"""Connect the outputs of sgv0 to the inputs of sgv1.
Args:
sgv0: the first subgraph to have its outputs swapped. This argument is
converted to a subgraph using the same rules as the function
subgraph.make_view.
Note that sgv0 is modified in place.
sgv1: the second subgraph to have its outputs swapped. This argument is
converted to a subgraph using the same rules as the function
subgraph.make_view.
Note that sgv1 is modified in place.
disconnect_first: if True the current outputs of sgv0 are disconnected.
Returns:
A tuple `(sgv0, sgv1)` of the now connected subgraphs.
Raises:
StandardError: if sgv0 or sgv1 cannot be converted to a SubGraphView using
the same rules than the function subgraph.make_view.
"""
sgv0 = subgraph.make_view(sgv0)
sgv1 = subgraph.make_view(sgv1)
util.check_graphs(sgv0, sgv1)
if disconnect_first:
detach_outputs(sgv0)
sgv0_outputs = subgraph.SubGraphView(passthrough_ts=sgv0.outputs)
reroute.reroute_inputs(sgv0_outputs, sgv1)
return sgv0, sgv1
def bypass(sgv):
"""Bypass the given subgraph by connecting its inputs to its outputs.
Args:
sgv: the subgraph view to be bypassed. This argument is converted to a
subgraph using the same rules than the function subgraph.make_view.
Note that sgv is modified in place.
Returns:
A tuple `(sgv, detached_inputs)` where:
`sgv` is a new subgraph view of the bypassed subgraph;
`detached_inputs` is a list of the created input placeholders.
Raises:
StandardError: if sgv cannot be converted to a SubGraphView using
the same rules than the function subgraph.make_view.
"""
# TODO(fkp): allows to plug sgv.inputs to individual sgv.outputs consumers
sgv = subgraph.make_view(sgv)
sgv_inputs = list(sgv.inputs)
sgv, detached_inputs = detach_inputs(sgv)
reroute.reroute_ts(sgv_inputs, sgv.outputs)
return sgv, detached_inputs
|
|
# -*- coding: utf-8 -*-
"""
requests.adapters
~~~~~~~~~~~~~~~~~
This module contains the transport adapters that Requests uses to define
and maintain connections.
"""
import socket
from .models import Response
from urllib3.poolmanager import PoolManager, proxy_from_url
from urllib3.response import HTTPResponse
from urllib3.util import Timeout as TimeoutSauce
from .compat import urlparse, basestring, urldefrag, unquote
from .utils import (DEFAULT_CA_BUNDLE_PATH, get_encoding_from_headers,
except_on_missing_scheme, get_auth_from_url)
from .structures import CaseInsensitiveDict
from urllib3.exceptions import MaxRetryError
from urllib3.exceptions import TimeoutError
from urllib3.exceptions import SSLError as _SSLError
from urllib3.exceptions import HTTPError as _HTTPError
from urllib3.exceptions import ProxyError as _ProxyError
from .cookies import extract_cookies_to_jar
from .exceptions import ConnectionError, Timeout, SSLError, ProxyError
from .auth import _basic_auth_str
DEFAULT_POOLBLOCK = False
DEFAULT_POOLSIZE = 10
DEFAULT_RETRIES = 0
class BaseAdapter(object):
"""The Base Transport Adapter"""
def __init__(self):
super(BaseAdapter, self).__init__()
def send(self):
raise NotImplementedError
def close(self):
raise NotImplementedError
class HTTPAdapter(BaseAdapter):
"""The built-in HTTP Adapter for urllib3.
Provides a general-case interface for Requests sessions to contact HTTP and
HTTPS urls by implementing the Transport Adapter interface. This class will
usually be created by the :class:`Session <Session>` class under the
covers.
:param pool_connections: The number of urllib3 connection pools to cache.
:param pool_maxsize: The maximum number of connections to save in the pool.
:param int max_retries: The maximum number of retries each connection
should attempt. Note, this applies only to failed connections and
timeouts, never to requests where the server returns a response.
:param pool_block: Whether the connection pool should block for connections.
Usage::
>>> import requests
>>> s = requests.Session()
>>> a = requests.adapters.HTTPAdapter(max_retries=3)
>>> s.mount('http://', a)
"""
__attrs__ = ['max_retries', 'config', '_pool_connections', '_pool_maxsize',
'_pool_block']
def __init__(self, pool_connections=DEFAULT_POOLSIZE,
pool_maxsize=DEFAULT_POOLSIZE, max_retries=DEFAULT_RETRIES,
pool_block=DEFAULT_POOLBLOCK):
self.max_retries = max_retries
self.config = {}
self.proxy_manager = {}
super(HTTPAdapter, self).__init__()
self._pool_connections = pool_connections
self._pool_maxsize = pool_maxsize
self._pool_block = pool_block
self.init_poolmanager(pool_connections, pool_maxsize, block=pool_block)
def __getstate__(self):
return dict((attr, getattr(self, attr, None)) for attr in
self.__attrs__)
def __setstate__(self, state):
# Can't handle by adding 'proxy_manager' to self.__attrs__ because
# because self.poolmanager uses a lambda function, which isn't pickleable.
self.proxy_manager = {}
self.config = {}
for attr, value in state.items():
setattr(self, attr, value)
self.init_poolmanager(self._pool_connections, self._pool_maxsize,
block=self._pool_block)
def init_poolmanager(self, connections, maxsize, block=DEFAULT_POOLBLOCK):
"""Initializes a urllib3 PoolManager. This method should not be called
from user code, and is only exposed for use when subclassing the
:class:`HTTPAdapter <requests.adapters.HTTPAdapter>`.
:param connections: The number of urllib3 connection pools to cache.
:param maxsize: The maximum number of connections to save in the pool.
:param block: Block when no free connections are available.
"""
# save these values for pickling
self._pool_connections = connections
self._pool_maxsize = maxsize
self._pool_block = block
self.poolmanager = PoolManager(num_pools=connections, maxsize=maxsize,
block=block)
def cert_verify(self, conn, url, verify, cert):
"""Verify a SSL certificate. This method should not be called from user
code, and is only exposed for use when subclassing the
:class:`HTTPAdapter <requests.adapters.HTTPAdapter>`.
:param conn: The urllib3 connection object associated with the cert.
:param url: The requested URL.
:param verify: Whether we should actually verify the certificate.
:param cert: The SSL certificate to verify.
"""
if url.lower().startswith('https') and verify:
cert_loc = None
# Allow self-specified cert location.
if verify is not True:
cert_loc = verify
if not cert_loc:
cert_loc = DEFAULT_CA_BUNDLE_PATH
if not cert_loc:
raise Exception("Could not find a suitable SSL CA certificate bundle.")
conn.cert_reqs = 'CERT_REQUIRED'
conn.ca_certs = cert_loc
else:
conn.cert_reqs = 'CERT_NONE'
conn.ca_certs = None
if cert:
if not isinstance(cert, basestring):
conn.cert_file = cert[0]
conn.key_file = cert[1]
else:
conn.cert_file = cert
def build_response(self, req, resp):
"""Builds a :class:`Response <requests.Response>` object from a urllib3
response. This should not be called from user code, and is only exposed
for use when subclassing the
:class:`HTTPAdapter <requests.adapters.HTTPAdapter>`
:param req: The :class:`PreparedRequest <PreparedRequest>` used to generate the response.
:param resp: The urllib3 response object.
"""
response = Response()
# Fallback to None if there's no status_code, for whatever reason.
response.status_code = getattr(resp, 'status', None)
# Make headers case-insensitive.
response.headers = CaseInsensitiveDict(getattr(resp, 'headers', {}))
# Set encoding.
response.encoding = get_encoding_from_headers(response.headers)
response.raw = resp
response.reason = response.raw.reason
if isinstance(req.url, bytes):
response.url = req.url.decode('utf-8')
else:
response.url = req.url
# Add new cookies from the server.
extract_cookies_to_jar(response.cookies, req, resp)
# Give the Response some context.
response.request = req
response.connection = self
return response
def get_connection(self, url, proxies=None):
"""Returns a urllib3 connection for the given URL. This should not be
called from user code, and is only exposed for use when subclassing the
:class:`HTTPAdapter <requests.adapters.HTTPAdapter>`.
:param url: The URL to connect to.
:param proxies: (optional) A Requests-style dictionary of proxies used on this request.
"""
proxies = proxies or {}
proxy = proxies.get(urlparse(url.lower()).scheme)
if proxy:
except_on_missing_scheme(proxy)
proxy_headers = self.proxy_headers(proxy)
if not proxy in self.proxy_manager:
self.proxy_manager[proxy] = proxy_from_url(
proxy,
proxy_headers=proxy_headers,
num_pools=self._pool_connections,
maxsize=self._pool_maxsize,
block=self._pool_block)
conn = self.proxy_manager[proxy].connection_from_url(url)
else:
# Only scheme should be lower case
parsed = urlparse(url)
url = parsed.geturl()
conn = self.poolmanager.connection_from_url(url)
return conn
def close(self):
"""Disposes of any internal state.
Currently, this just closes the PoolManager, which closes pooled
connections.
"""
self.poolmanager.clear()
def request_url(self, request, proxies):
"""Obtain the url to use when making the final request.
If the message is being sent through a HTTP proxy, the full URL has to
be used. Otherwise, we should only use the path portion of the URL.
This should not be called from user code, and is only exposed for use
when subclassing the
:class:`HTTPAdapter <requests.adapters.HTTPAdapter>`.
:param request: The :class:`PreparedRequest <PreparedRequest>` being sent.
:param proxies: A dictionary of schemes to proxy URLs.
"""
proxies = proxies or {}
scheme = urlparse(request.url).scheme
proxy = proxies.get(scheme)
if proxy and scheme != 'https':
url, _ = urldefrag(request.url)
else:
url = request.path_url
return url
def add_headers(self, request, **kwargs):
"""Add any headers needed by the connection. As of v2.0 this does
nothing by default, but is left for overriding by users that subclass
the :class:`HTTPAdapter <requests.adapters.HTTPAdapter>`.
This should not be called from user code, and is only exposed for use
when subclassing the
:class:`HTTPAdapter <requests.adapters.HTTPAdapter>`.
:param request: The :class:`PreparedRequest <PreparedRequest>` to add headers to.
:param kwargs: The keyword arguments from the call to send().
"""
pass
def proxy_headers(self, proxy):
"""Returns a dictionary of the headers to add to any request sent
through a proxy. This works with urllib3 magic to ensure that they are
correctly sent to the proxy, rather than in a tunnelled request if
CONNECT is being used.
This should not be called from user code, and is only exposed for use
when subclassing the
:class:`HTTPAdapter <requests.adapters.HTTPAdapter>`.
:param proxies: The url of the proxy being used for this request.
:param kwargs: Optional additional keyword arguments.
"""
headers = {}
username, password = get_auth_from_url(proxy)
if username and password:
headers['Proxy-Authorization'] = _basic_auth_str(username,
password)
return headers
def send(self, request, stream=False, timeout=None, verify=True, cert=None, proxies=None):
"""Sends PreparedRequest object. Returns Response object.
:param request: The :class:`PreparedRequest <PreparedRequest>` being sent.
:param stream: (optional) Whether to stream the request content.
:param timeout: (optional) The timeout on the request.
:param verify: (optional) Whether to verify SSL certificates.
:param cert: (optional) Any user-provided SSL certificate to be trusted.
:param proxies: (optional) The proxies dictionary to apply to the request.
"""
conn = self.get_connection(request.url, proxies)
self.cert_verify(conn, request.url, verify, cert)
url = self.request_url(request, proxies)
self.add_headers(request)
chunked = not (request.body is None or 'Content-Length' in request.headers)
if stream:
timeout = TimeoutSauce(connect=timeout)
else:
timeout = TimeoutSauce(connect=timeout, read=timeout)
try:
if not chunked:
resp = conn.urlopen(
method=request.method,
url=url,
body=request.body,
headers=request.headers,
redirect=False,
assert_same_host=False,
preload_content=False,
decode_content=False,
retries=self.max_retries,
timeout=timeout
)
# Send the request.
else:
if hasattr(conn, 'proxy_pool'):
conn = conn.proxy_pool
low_conn = conn._get_conn(timeout=timeout)
try:
low_conn.putrequest(request.method,
url,
skip_accept_encoding=True)
for header, value in request.headers.items():
low_conn.putheader(header, value)
low_conn.endheaders()
for i in request.body:
low_conn.send(hex(len(i))[2:].encode('utf-8'))
low_conn.send(b'\r\n')
low_conn.send(i)
low_conn.send(b'\r\n')
low_conn.send(b'0\r\n\r\n')
r = low_conn.getresponse()
resp = HTTPResponse.from_httplib(
r,
pool=conn,
connection=low_conn,
preload_content=False,
decode_content=False
)
except:
# If we hit any problems here, clean up the connection.
# Then, reraise so that we can handle the actual exception.
low_conn.close()
raise
else:
# All is well, return the connection to the pool.
conn._put_conn(low_conn)
except socket.error as sockerr:
raise ConnectionError(sockerr)
except MaxRetryError as e:
raise ConnectionError(e)
except _ProxyError as e:
raise ProxyError(e)
except (_SSLError, _HTTPError) as e:
if isinstance(e, _SSLError):
raise SSLError(e)
elif isinstance(e, TimeoutError):
raise Timeout(e)
else:
raise
r = self.build_response(request, resp)
if not stream:
r.content
return r
|
|
import os
import logging
import yaml
import pytest
import py
import trello
import requests.exceptions
from _pytest.python import getlocation
from _pytest.resultlog import generic_path
try:
from logging import NullHandler
except ImportError:
from logging import Handler
class NullHandler(Handler):
def emit(self, record):
pass
log = logging.getLogger(__name__)
log.addHandler(NullHandler())
"""
pytest-trello
~~~~~~~~~~~~
pytest-trello is a plugin for py.test that allows tests to reference trello
cards for skip/xfail handling.
:copyright: see LICENSE for details
:license: MIT, see LICENSE for more details.
"""
_card_cache = {}
DEFAULT_TRELLO_COMPLETED = ['Done', 'Archived']
def pytest_addoption(parser):
'''Add options to control trello integration.'''
group = parser.getgroup('pytest-trello')
group.addoption('--trello-cfg',
action='store',
dest='trello_cfg_file',
default='trello.yml',
metavar='TRELLO_CFG',
help='Trello configuration file (default: %default)')
group.addoption('--trello-api-key',
action='store',
dest='trello_api_key',
default=None,
metavar='TRELLO_API_KEY',
help='Trello API key (defaults to value supplied in TRELLO_CFG)')
group.addoption('--trello-api-token',
action='store',
dest='trello_api_token',
metavar='TRELLO_API_TOKEN',
default=None,
help='Trello API token (defaults to value supplied in TRELLO_CFG). Refer to https://trello.com/docs/gettingstarted/')
group.addoption('--trello-completed',
action='append',
dest='trello_completed',
metavar='TRELLO_COMPLETED',
default=[],
help='Any cards in TRELLO_COMPLETED are considered complete (default: %s)' % DEFAULT_TRELLO_COMPLETED)
group.addoption('--show-trello-cards',
action='store_true',
dest='show_trello_cards',
default=False,
help='Show a list of all trello card markers.')
def pytest_configure(config):
'''
Validate --trello-* parameters.
'''
log.debug("pytest_configure() called")
# Add marker
config.addinivalue_line("markers", """trello(*cards): Trello card integration""")
# Sanitize key and token
trello_cfg_file = config.getoption('trello_cfg_file')
trello_api_key = config.getoption('trello_api_key')
trello_api_token = config.getoption('trello_api_token')
trello_completed = config.getoption('trello_completed')
# If not --help or --collectonly or --showfixtures ...
if not (config.option.help or config.option.collectonly or config.option.showfixtures):
# Warn if file does not exist
if not os.path.isfile(trello_cfg_file):
errstr = "No trello configuration file found matching: %s" % trello_cfg_file
log.warning(errstr)
# Load configuration file ...
if os.path.isfile(trello_cfg_file):
trello_cfg = yaml.load(open(trello_cfg_file, 'r'))
try:
trello_cfg = trello_cfg.get('trello', {})
except AttributeError:
trello_cfg = {}
errstr = "No trello configuration found in file: %s" % trello_cfg_file
log.warning(errstr)
if trello_api_key is None:
trello_api_key = trello_cfg.get('key', None)
if trello_api_token is None:
trello_api_token = trello_cfg.get('token', None)
if trello_completed is None or trello_completed == []:
trello_completed = trello_cfg.get('completed', [])
# Initialize trello api connection
api = trello.TrelloApi(trello_api_key, trello_api_token)
# If completed is still empty, load default ...
if trello_completed is None or trello_completed == []:
trello_completed = DEFAULT_TRELLO_COMPLETED
# Register pytest plugin
assert config.pluginmanager.register(
TrelloPytestPlugin(api, completed_lists=trello_completed),
'trello_helper'
)
def pytest_cmdline_main(config):
'''Check show_fixture_duplicates option to show fixture duplicates.'''
log.debug("pytest_cmdline_main() called")
if config.option.show_trello_cards:
from _pytest.main import wrap_session
wrap_session(config, __show_trello_cards)
return 0
def __show_trello_cards(config, session):
'''Generate a report that includes all linked trello cards, and their status.'''
session.perform_collect()
curdir = py.path.local()
trello_helper = config.pluginmanager.getplugin("trello_helper")
card_cache = dict()
for i, item in enumerate(filter(lambda i: i.get_marker("trello") is not None, session.items)):
cards = item.funcargs.get('cards', [])
for card in cards:
if card not in card_cache:
card_cache[card] = list()
card_cache[card].append(generic_path(item))
reporter = config.pluginmanager.getplugin("terminalreporter")
reporter.section("trello card report")
if card_cache:
for card, gpaths in card_cache.items():
reporter.write("{0} ".format(card.url), bold=True)
reporter.write_line("[{0}] {1}".format(card.list.name, card.name))
for gpath in gpaths:
reporter.write_line(" * %s" % gpath)
else:
reporter.write_line("No trello cards collected")
class TrelloCard(object):
'''Object representing a trello card.
'''
def __init__(self, api, url):
self.api = api
self.url = url
self._card = None
@property
def id(self):
return os.path.basename(self.url)
@property
def card(self):
if self._card is None:
try:
self._card = self.api.cards.get(self.id)
except ValueError, e:
log.warning("Failed to retrieve card:%s - %s" % (self.id, e))
pass
return self._card
@property
def name(self):
return self.card['name']
@property
def idList(self):
return self.card['idList']
@property
def list(self):
return TrelloList(self.api, self.idList)
class TrelloList(object):
'''Object representing a trello list.
'''
def __init__(self, api, id):
self.api = api
self.id = id
self._list = None
@property
def name(self):
if self._list is None:
try:
self._list = self.api.lists.get(self.id)
except ValueError, e:
log.warning("Failed to retrieve list:%s - %s" % (self.id, e))
pass
return self._list['name']
class TrelloCardList(object):
'''Object representing a list of trello cards.'''
def __init__(self, api, *cards, **kwargs):
self.api = api
self.cards = cards
self.xfail = kwargs.get('xfail', True) and not ('skip' in kwargs)
def __iter__(self):
for card in self.cards:
if card not in _card_cache:
_card_cache[card] = TrelloCard(self.api, card)
yield _card_cache[card]
class TrelloPytestPlugin(object):
def __init__(self, api, **kwargs):
log.debug("TrelloPytestPlugin initialized")
self.api = api
self.completed_lists = kwargs.get('completed_lists', [])
def pytest_runtest_setup(self, item):
log.debug("pytest_runtest_setup() called")
if 'trello' not in item.keywords:
return
incomplete_cards = []
cards = item.funcargs["cards"]
for card in cards:
try:
if card.list.name not in self.completed_lists:
incomplete_cards.append(card)
except requests.exceptions.HTTPError, e:
log.warning("Error accessing card:%s - %s" % (card.id, e))
continue
# item.get_marker('trello').kwargs
if incomplete_cards:
if cards.xfail:
item.add_marker(pytest.mark.xfail(
reason="Xfailing due to incomplete trello cards: \n{0}".format(
"\n ".join(["{0} [{1}] {2}".format(card.url, card.list.name, card.name) for card in incomplete_cards]))))
else:
pytest.skip("Skipping due to incomplete trello cards:\n{0}".format(
"\n ".join(["{0} [{1}] {2}".format(card.url, card.list.name, card.name) for card in incomplete_cards])))
def pytest_collection_modifyitems(self, session, config, items):
log.debug("pytest_collection_modifyitems() called")
reporter = config.pluginmanager.getplugin("terminalreporter")
reporter.write("collected", bold=True)
for i, item in enumerate(filter(lambda i: i.get_marker("trello") is not None, items)):
marker = item.get_marker('trello')
cards = tuple(sorted(set(marker.args))) # (O_O) for caching
for card in cards:
if card not in _card_cache:
_card_cache[card] = TrelloCard(self.api, card)
item.funcargs["cards"] = TrelloCardList(self.api, *cards, **marker.kwargs)
reporter.write(" {0} trello markers\n".format(len(_card_cache)), bold=True)
|
|
from unittest import skipIf, skipUnless
from django.contrib.gis.db.models import fields
from django.contrib.gis.geos import MultiPolygon, Polygon
from django.core.exceptions import ImproperlyConfigured
from django.db import connection, migrations, models
from django.db.migrations.migration import Migration
from django.db.migrations.state import ProjectState
from django.test import (
TransactionTestCase, skipIfDBFeature, skipUnlessDBFeature,
)
from ..utils import mysql, oracle, spatialite
try:
GeometryColumns = connection.ops.geometry_columns()
HAS_GEOMETRY_COLUMNS = True
except NotImplementedError:
HAS_GEOMETRY_COLUMNS = False
class OperationTestCase(TransactionTestCase):
available_apps = ['gis_tests.gis_migrations']
def tearDown(self):
# Delete table after testing
if hasattr(self, 'current_state'):
self.apply_operations('gis', self.current_state, [migrations.DeleteModel('Neighborhood')])
super().tearDown()
@property
def has_spatial_indexes(self):
if mysql:
with connection.cursor() as cursor:
return connection.introspection.supports_spatial_index(cursor, 'gis_neighborhood')
return True
def get_table_description(self, table):
with connection.cursor() as cursor:
return connection.introspection.get_table_description(cursor, table)
def assertColumnExists(self, table, column):
self.assertIn(column, [c.name for c in self.get_table_description(table)])
def assertColumnNotExists(self, table, column):
self.assertNotIn(column, [c.name for c in self.get_table_description(table)])
def apply_operations(self, app_label, project_state, operations):
migration = Migration('name', app_label)
migration.operations = operations
with connection.schema_editor() as editor:
return migration.apply(project_state, editor)
def set_up_test_model(self, force_raster_creation=False):
test_fields = [
('id', models.AutoField(primary_key=True)),
('name', models.CharField(max_length=100, unique=True)),
('geom', fields.MultiPolygonField(srid=4326))
]
if connection.features.supports_raster or force_raster_creation:
test_fields += [('rast', fields.RasterField(srid=4326, null=True))]
operations = [migrations.CreateModel('Neighborhood', test_fields)]
self.current_state = self.apply_operations('gis', ProjectState(), operations)
def assertGeometryColumnsCount(self, expected_count):
self.assertEqual(
GeometryColumns.objects.filter(**{
'%s__iexact' % GeometryColumns.table_name_col(): 'gis_neighborhood',
}).count(),
expected_count
)
def assertSpatialIndexExists(self, table, column, raster=False):
with connection.cursor() as cursor:
constraints = connection.introspection.get_constraints(cursor, table)
if raster:
self.assertTrue(any(
'st_convexhull(%s)' % column in c['definition']
for c in constraints.values()
if c['definition'] is not None
))
else:
self.assertIn([column], [c['columns'] for c in constraints.values()])
def alter_gis_model(self, migration_class, model_name, field_name,
blank=False, field_class=None, field_class_kwargs=None):
args = [model_name, field_name]
if field_class:
field_class_kwargs = field_class_kwargs or {'srid': 4326, 'blank': blank}
args.append(field_class(**field_class_kwargs))
operation = migration_class(*args)
old_state = self.current_state.clone()
operation.state_forwards('gis', self.current_state)
with connection.schema_editor() as editor:
operation.database_forwards('gis', editor, old_state, self.current_state)
class OperationTests(OperationTestCase):
def setUp(self):
super().setUp()
self.set_up_test_model()
def test_add_geom_field(self):
"""
Test the AddField operation with a geometry-enabled column.
"""
self.alter_gis_model(migrations.AddField, 'Neighborhood', 'path', False, fields.LineStringField)
self.assertColumnExists('gis_neighborhood', 'path')
# Test GeometryColumns when available
if HAS_GEOMETRY_COLUMNS:
self.assertGeometryColumnsCount(2)
# Test spatial indices when available
if self.has_spatial_indexes:
self.assertSpatialIndexExists('gis_neighborhood', 'path')
@skipUnless(HAS_GEOMETRY_COLUMNS, "Backend doesn't support GeometryColumns.")
def test_geom_col_name(self):
self.assertEqual(
GeometryColumns.geom_col_name(),
'column_name' if oracle else 'f_geometry_column',
)
@skipUnlessDBFeature('supports_raster')
def test_add_raster_field(self):
"""
Test the AddField operation with a raster-enabled column.
"""
self.alter_gis_model(migrations.AddField, 'Neighborhood', 'heatmap', False, fields.RasterField)
self.assertColumnExists('gis_neighborhood', 'heatmap')
# Test spatial indices when available
if self.has_spatial_indexes:
self.assertSpatialIndexExists('gis_neighborhood', 'heatmap', raster=True)
def test_add_blank_geom_field(self):
"""
Should be able to add a GeometryField with blank=True.
"""
self.alter_gis_model(migrations.AddField, 'Neighborhood', 'path', True, fields.LineStringField)
self.assertColumnExists('gis_neighborhood', 'path')
# Test GeometryColumns when available
if HAS_GEOMETRY_COLUMNS:
self.assertGeometryColumnsCount(2)
# Test spatial indices when available
if self.has_spatial_indexes:
self.assertSpatialIndexExists('gis_neighborhood', 'path')
@skipUnlessDBFeature('supports_raster')
def test_add_blank_raster_field(self):
"""
Should be able to add a RasterField with blank=True.
"""
self.alter_gis_model(migrations.AddField, 'Neighborhood', 'heatmap', True, fields.RasterField)
self.assertColumnExists('gis_neighborhood', 'heatmap')
# Test spatial indices when available
if self.has_spatial_indexes:
self.assertSpatialIndexExists('gis_neighborhood', 'heatmap', raster=True)
def test_remove_geom_field(self):
"""
Test the RemoveField operation with a geometry-enabled column.
"""
self.alter_gis_model(migrations.RemoveField, 'Neighborhood', 'geom')
self.assertColumnNotExists('gis_neighborhood', 'geom')
# Test GeometryColumns when available
if HAS_GEOMETRY_COLUMNS:
self.assertGeometryColumnsCount(0)
@skipUnlessDBFeature('supports_raster')
def test_remove_raster_field(self):
"""
Test the RemoveField operation with a raster-enabled column.
"""
self.alter_gis_model(migrations.RemoveField, 'Neighborhood', 'rast')
self.assertColumnNotExists('gis_neighborhood', 'rast')
def test_create_model_spatial_index(self):
if not self.has_spatial_indexes:
self.skipTest('No support for Spatial indexes')
self.assertSpatialIndexExists('gis_neighborhood', 'geom')
if connection.features.supports_raster:
self.assertSpatialIndexExists('gis_neighborhood', 'rast', raster=True)
@skipUnlessDBFeature("supports_3d_storage")
@skipIf(spatialite, "Django currently doesn't support altering Spatialite geometry fields")
def test_alter_geom_field_dim(self):
Neighborhood = self.current_state.apps.get_model('gis', 'Neighborhood')
p1 = Polygon(((0, 0), (0, 1), (1, 1), (1, 0), (0, 0)))
Neighborhood.objects.create(name='TestDim', geom=MultiPolygon(p1, p1))
# Add 3rd dimension.
self.alter_gis_model(
migrations.AlterField, 'Neighborhood', 'geom', False,
fields.MultiPolygonField, field_class_kwargs={'srid': 4326, 'dim': 3}
)
self.assertTrue(Neighborhood.objects.first().geom.hasz)
# Rewind to 2 dimensions.
self.alter_gis_model(
migrations.AlterField, 'Neighborhood', 'geom', False,
fields.MultiPolygonField, field_class_kwargs={'srid': 4326, 'dim': 2}
)
self.assertFalse(Neighborhood.objects.first().geom.hasz)
@skipIfDBFeature('supports_raster')
class NoRasterSupportTests(OperationTestCase):
def test_create_raster_model_on_db_without_raster_support(self):
msg = 'Raster fields require backends with raster support.'
with self.assertRaisesMessage(ImproperlyConfigured, msg):
self.set_up_test_model(force_raster_creation=True)
def test_add_raster_field_on_db_without_raster_support(self):
msg = 'Raster fields require backends with raster support.'
with self.assertRaisesMessage(ImproperlyConfigured, msg):
self.set_up_test_model()
self.alter_gis_model(
migrations.AddField, 'Neighborhood', 'heatmap',
False, fields.RasterField
)
|
|
import ctypes
import inspect
import os
import subprocess
import sys
import tempfile
import numpy as np
from numba.core.typing.templates import AbstractTemplate, ConcreteTemplate
from numba.core import (types, typing, utils, funcdesc, serialize, config,
compiler, sigutils)
from numba.core.compiler_lock import global_compiler_lock
import numba
from .cudadrv.devices import get_context
from .cudadrv import nvvm, driver
from .errors import missing_launch_config_msg, normalize_kernel_dimensions
from .api import get_current_device
from .args import wrap_arg
@global_compiler_lock
def compile_cuda(pyfunc, return_type, args, debug=False, inline=False):
# First compilation will trigger the initialization of the CUDA backend.
from .descriptor import CUDATargetDesc
typingctx = CUDATargetDesc.typingctx
targetctx = CUDATargetDesc.targetctx
# TODO handle debug flag
flags = compiler.Flags()
# Do not compile (generate native code), just lower (to LLVM)
flags.set('no_compile')
flags.set('no_cpython_wrapper')
flags.set('no_cfunc_wrapper')
if debug:
flags.set('debuginfo')
if inline:
flags.set('forceinline')
# Run compilation pipeline
cres = compiler.compile_extra(typingctx=typingctx,
targetctx=targetctx,
func=pyfunc,
args=args,
return_type=return_type,
flags=flags,
locals={})
library = cres.library
library.finalize()
return cres
@global_compiler_lock
def compile_kernel(pyfunc, args, link, debug=False, inline=False,
fastmath=False, extensions=[], max_registers=None, opt=True):
cres = compile_cuda(pyfunc, types.void, args, debug=debug, inline=inline)
fname = cres.fndesc.llvm_func_name
lib, kernel = cres.target_context.prepare_cuda_kernel(cres.library, fname,
cres.signature.args,
debug=debug)
cukern = _Kernel(llvm_module=lib._final_module,
name=kernel.name,
pretty_name=cres.fndesc.qualname,
argtypes=cres.signature.args,
type_annotation=cres.type_annotation,
link=link,
debug=debug,
opt=opt,
call_helper=cres.call_helper,
fastmath=fastmath,
extensions=extensions,
max_registers=max_registers)
return cukern
@global_compiler_lock
def compile_ptx(pyfunc, args, debug=False, device=False, fastmath=False,
cc=None, opt=True):
"""Compile a Python function to PTX for a given set of argument types.
:param pyfunc: The Python function to compile.
:param args: A tuple of argument types to compile for.
:param debug: Whether to include debug info in the generated PTX.
:type debug: bool
:param device: Whether to compile a device function. Defaults to ``False``,
to compile global kernel functions.
:type device: bool
:param fastmath: Whether to enable fast math flags (ftz=1, prec_sqrt=0,
prec_div=, and fma=1)
:type fastmath: bool
:param cc: Compute capability to compile for, as a tuple ``(MAJOR, MINOR)``.
Defaults to ``(5, 2)``.
:type cc: tuple
:param opt: Enable optimizations. Defaults to ``True``.
:type opt: bool
:return: (ptx, resty): The PTX code and inferred return type
:rtype: tuple
"""
cres = compile_cuda(pyfunc, None, args, debug=debug)
resty = cres.signature.return_type
if device:
llvm_module = cres.library._final_module
nvvm.fix_data_layout(llvm_module)
else:
fname = cres.fndesc.llvm_func_name
tgt = cres.target_context
lib, kernel = tgt.prepare_cuda_kernel(cres.library, fname,
cres.signature.args, debug=debug)
llvm_module = lib._final_module
options = {
'debug': debug,
'fastmath': fastmath,
}
cc = cc or config.CUDA_DEFAULT_PTX_CC
opt = 3 if opt else 0
arch = nvvm.get_arch_option(*cc)
llvmir = str(llvm_module)
ptx = nvvm.llvm_to_ptx(llvmir, opt=opt, arch=arch, **options)
return ptx.decode('utf-8'), resty
def compile_ptx_for_current_device(pyfunc, args, debug=False, device=False,
fastmath=False, opt=True):
"""Compile a Python function to PTX for a given set of argument types for
the current device's compute capabilility. This calls :func:`compile_ptx`
with an appropriate ``cc`` value for the current device."""
cc = get_current_device().compute_capability
return compile_ptx(pyfunc, args, debug=-debug, device=device,
fastmath=fastmath, cc=cc, opt=True)
def disassemble_cubin(cubin):
# nvdisasm only accepts input from a file, so we need to write out to a
# temp file and clean up afterwards.
fd = None
fname = None
try:
fd, fname = tempfile.mkstemp()
with open(fname, 'wb') as f:
f.write(cubin)
try:
cp = subprocess.run(['nvdisasm', fname], check=True,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
except FileNotFoundError as e:
if e.filename == 'nvdisasm':
msg = ("nvdisasm is required for SASS inspection, and has not "
"been found.\n\nYou may need to install the CUDA "
"toolkit and ensure that it is available on your "
"PATH.\n")
raise RuntimeError(msg)
return cp.stdout.decode('utf-8')
finally:
if fd is not None:
os.close(fd)
if fname is not None:
os.unlink(fname)
class DeviceFunctionTemplate(serialize.ReduceMixin):
"""Unmaterialized device function
"""
def __init__(self, pyfunc, debug, inline, opt):
self.py_func = pyfunc
self.debug = debug
self.inline = inline
self.opt = opt
self._compileinfos = {}
name = getattr(pyfunc, '__name__', 'unknown')
self.__name__ = f"{name} <CUDA device function>".format(name)
def _reduce_states(self):
return dict(py_func=self.py_func, debug=self.debug, inline=self.inline)
@classmethod
def _rebuild(cls, py_func, debug, inline):
return compile_device_template(py_func, debug=debug, inline=inline)
def compile(self, args):
"""Compile the function for the given argument types.
Each signature is compiled once by caching the compiled function inside
this object.
Returns the `CompileResult`.
"""
if args not in self._compileinfos:
cres = compile_cuda(self.py_func, None, args, debug=self.debug,
inline=self.inline)
first_definition = not self._compileinfos
self._compileinfos[args] = cres
libs = [cres.library]
if first_definition:
# First definition
cres.target_context.insert_user_function(self, cres.fndesc,
libs)
else:
cres.target_context.add_user_function(self, cres.fndesc, libs)
else:
cres = self._compileinfos[args]
return cres
def inspect_llvm(self, args):
"""Returns the LLVM-IR text compiled for *args*.
Parameters
----------
args: tuple[Type]
Argument types.
Returns
-------
llvmir : str
"""
# Force a compilation to occur if none has yet - this can be needed if
# the user attempts to inspect LLVM IR or PTX before the function has
# been called for the given arguments from a jitted kernel.
self.compile(args)
cres = self._compileinfos[args]
mod = cres.library._final_module
return str(mod)
def inspect_ptx(self, args, nvvm_options={}):
"""Returns the PTX compiled for *args* for the currently active GPU
Parameters
----------
args: tuple[Type]
Argument types.
nvvm_options : dict; optional
See `CompilationUnit.compile` in `numba/cuda/cudadrv/nvvm.py`.
Returns
-------
ptx : bytes
"""
llvmir = self.inspect_llvm(args)
# Make PTX
cuctx = get_context()
device = cuctx.device
cc = device.compute_capability
arch = nvvm.get_arch_option(*cc)
opt = 3 if self.opt else 0
ptx = nvvm.llvm_to_ptx(llvmir, opt=opt, arch=arch, **nvvm_options)
return ptx
def compile_device_template(pyfunc, debug=False, inline=False, opt=True):
"""Create a DeviceFunctionTemplate object and register the object to
the CUDA typing context.
"""
from .descriptor import CUDATargetDesc
dft = DeviceFunctionTemplate(pyfunc, debug=debug, inline=inline, opt=opt)
class device_function_template(AbstractTemplate):
key = dft
def generic(self, args, kws):
assert not kws
return dft.compile(args).signature
def get_template_info(cls):
basepath = os.path.dirname(os.path.dirname(numba.__file__))
code, firstlineno = inspect.getsourcelines(pyfunc)
path = inspect.getsourcefile(pyfunc)
sig = str(utils.pysignature(pyfunc))
info = {
'kind': "overload",
'name': getattr(cls.key, '__name__', "unknown"),
'sig': sig,
'filename': utils.safe_relpath(path, start=basepath),
'lines': (firstlineno, firstlineno + len(code) - 1),
'docstring': pyfunc.__doc__
}
return info
typingctx = CUDATargetDesc.typingctx
typingctx.insert_user_function(dft, device_function_template)
return dft
def compile_device(pyfunc, return_type, args, inline=True, debug=False):
return DeviceFunction(pyfunc, return_type, args, inline=True, debug=False)
def declare_device_function(name, restype, argtypes):
from .descriptor import CUDATargetDesc
typingctx = CUDATargetDesc.typingctx
targetctx = CUDATargetDesc.targetctx
sig = typing.signature(restype, *argtypes)
extfn = ExternFunction(name, sig)
class device_function_template(ConcreteTemplate):
key = extfn
cases = [sig]
fndesc = funcdesc.ExternalFunctionDescriptor(
name=name, restype=restype, argtypes=argtypes)
typingctx.insert_user_function(extfn, device_function_template)
targetctx.insert_user_function(extfn, fndesc)
return extfn
class DeviceFunction(serialize.ReduceMixin):
def __init__(self, pyfunc, return_type, args, inline, debug):
self.py_func = pyfunc
self.return_type = return_type
self.args = args
self.inline = True
self.debug = False
cres = compile_cuda(self.py_func, self.return_type, self.args,
debug=self.debug, inline=self.inline)
self.cres = cres
class device_function_template(ConcreteTemplate):
key = self
cases = [cres.signature]
cres.typing_context.insert_user_function(
self, device_function_template)
cres.target_context.insert_user_function(self, cres.fndesc,
[cres.library])
def _reduce_states(self):
return dict(py_func=self.py_func, return_type=self.return_type,
args=self.args, inline=self.inline, debug=self.debug)
@classmethod
def _rebuild(cls, py_func, return_type, args, inline, debug):
return cls(py_func, return_type, args, inline, debug)
def __repr__(self):
fmt = "<DeviceFunction py_func={0} signature={1}>"
return fmt.format(self.py_func, self.cres.signature)
class ExternFunction(object):
def __init__(self, name, sig):
self.name = name
self.sig = sig
class ForAll(object):
def __init__(self, kernel, ntasks, tpb, stream, sharedmem):
if ntasks < 0:
raise ValueError("Can't create ForAll with negative task count: %s"
% ntasks)
self.kernel = kernel
self.ntasks = ntasks
self.thread_per_block = tpb
self.stream = stream
self.sharedmem = sharedmem
def __call__(self, *args):
if self.ntasks == 0:
return
if self.kernel.specialized:
kernel = self.kernel
else:
kernel = self.kernel.specialize(*args)
blockdim = self._compute_thread_per_block(kernel)
griddim = (self.ntasks + blockdim - 1) // blockdim
return kernel[griddim, blockdim, self.stream, self.sharedmem](*args)
def _compute_thread_per_block(self, kernel):
tpb = self.thread_per_block
# Prefer user-specified config
if tpb != 0:
return tpb
# Else, ask the driver to give a good config
else:
ctx = get_context()
kwargs = dict(
func=kernel._func.get(),
b2d_func=0, # dynamic-shared memory is constant to blksz
memsize=self.sharedmem,
blocksizelimit=1024,
)
_, tpb = ctx.get_max_potential_block_size(**kwargs)
return tpb
class CachedPTX(object):
"""A PTX cache that uses compute capability as a cache key
"""
def __init__(self, name, llvmir, options):
self.name = name
self.llvmir = llvmir
self.cache = {}
self._extra_options = options.copy()
def get(self):
"""
Get PTX for the current active context.
"""
cuctx = get_context()
device = cuctx.device
cc = device.compute_capability
ptx = self.cache.get(cc)
if ptx is None:
arch = nvvm.get_arch_option(*cc)
ptx = nvvm.llvm_to_ptx(self.llvmir, arch=arch,
**self._extra_options)
self.cache[cc] = ptx
if config.DUMP_ASSEMBLY:
print(("ASSEMBLY %s" % self.name).center(80, '-'))
print(ptx.decode('utf-8'))
print('=' * 80)
return ptx
class CachedCUFunction(serialize.ReduceMixin):
"""
Get or compile CUDA function for the current active context
Uses device ID as key for cache.
"""
def __init__(self, entry_name, ptx, linking, max_registers):
self.entry_name = entry_name
self.ptx = ptx
self.linking = linking
self.cache = {}
self.ccinfos = {}
self.cubins = {}
self.max_registers = max_registers
def get(self):
cuctx = get_context()
device = cuctx.device
cufunc = self.cache.get(device.id)
if cufunc is None:
ptx = self.ptx.get()
# Link
linker = driver.Linker(max_registers=self.max_registers)
linker.add_ptx(ptx)
for path in self.linking:
linker.add_file_guess_ext(path)
cubin, size = linker.complete()
compile_info = linker.info_log
module = cuctx.create_module_image(cubin)
# Load
cufunc = module.get_function(self.entry_name)
# Populate caches
self.cache[device.id] = cufunc
self.ccinfos[device.id] = compile_info
# We take a copy of the cubin because it's owned by the linker
cubin_ptr = ctypes.cast(cubin, ctypes.POINTER(ctypes.c_char))
cubin_data = np.ctypeslib.as_array(cubin_ptr, shape=(size,)).copy()
self.cubins[device.id] = cubin_data
return cufunc
def get_sass(self):
self.get() # trigger compilation
device = get_context().device
return disassemble_cubin(self.cubins[device.id])
def get_info(self):
self.get() # trigger compilation
cuctx = get_context()
device = cuctx.device
ci = self.ccinfos[device.id]
return ci
def _reduce_states(self):
"""
Reduce the instance for serialization.
Pre-compiled PTX code string is serialized inside the `ptx` (CachedPTX).
Loaded CUfunctions are discarded. They are recreated when unserialized.
"""
if self.linking:
msg = ('cannot pickle CUDA kernel function with additional '
'libraries to link against')
raise RuntimeError(msg)
return dict(entry_name=self.entry_name, ptx=self.ptx,
linking=self.linking, max_registers=self.max_registers)
@classmethod
def _rebuild(cls, entry_name, ptx, linking, max_registers):
"""
Rebuild an instance.
"""
return cls(entry_name, ptx, linking, max_registers)
class _Kernel(serialize.ReduceMixin):
'''
CUDA Kernel specialized for a given set of argument types. When called, this
object launches the kernel on the device.
'''
def __init__(self, llvm_module, name, pretty_name, argtypes, call_helper,
link=(), debug=False, fastmath=False, type_annotation=None,
extensions=[], max_registers=None, opt=True):
super().__init__()
# initialize CUfunction
options = {
'debug': debug,
'fastmath': fastmath,
'opt': 3 if opt else 0
}
ptx = CachedPTX(pretty_name, str(llvm_module), options=options)
cufunc = CachedCUFunction(name, ptx, link, max_registers)
# populate members
self.entry_name = name
self.argument_types = tuple(argtypes)
self.linking = tuple(link)
self._type_annotation = type_annotation
self._func = cufunc
self.debug = debug
self.call_helper = call_helper
self.extensions = list(extensions)
@classmethod
def _rebuild(cls, name, argtypes, cufunc, link, debug, call_helper,
extensions):
"""
Rebuild an instance.
"""
instance = cls.__new__(cls)
# invoke parent constructor
super(cls, instance).__init__()
# populate members
instance.entry_name = name
instance.argument_types = tuple(argtypes)
instance.linking = tuple(link)
instance._type_annotation = None
instance._func = cufunc
instance.debug = debug
instance.call_helper = call_helper
instance.extensions = extensions
return instance
def _reduce_states(self):
"""
Reduce the instance for serialization.
Compiled definitions are serialized in PTX form.
Type annotation are discarded.
Thread, block and shared memory configuration are serialized.
Stream information is discarded.
"""
return dict(name=self.entry_name, argtypes=self.argument_types,
cufunc=self._func, link=self.linking, debug=self.debug,
call_helper=self.call_helper, extensions=self.extensions)
def __call__(self, *args, **kwargs):
assert not kwargs
griddim, blockdim = normalize_kernel_dimensions(self.griddim,
self.blockdim)
self._kernel_call(args=args,
griddim=griddim,
blockdim=blockdim,
stream=self.stream,
sharedmem=self.sharedmem)
def bind(self):
"""
Force binding to current CUDA context
"""
self._func.get()
@property
def ptx(self):
'''
PTX code for this kernel.
'''
return self._func.ptx.get().decode('utf8')
@property
def device(self):
"""
Get current active context
"""
return get_current_device()
def inspect_llvm(self):
'''
Returns the LLVM IR for this kernel.
'''
return str(self._func.ptx.llvmir)
def inspect_asm(self):
'''
Returns the PTX code for this kernel.
'''
return self._func.ptx.get().decode('ascii')
def inspect_sass(self):
'''
Returns the SASS code for this kernel.
Requires nvdisasm to be available on the PATH.
'''
return self._func.get_sass()
def inspect_types(self, file=None):
'''
Produce a dump of the Python source of this function annotated with the
corresponding Numba IR and type information. The dump is written to
*file*, or *sys.stdout* if *file* is *None*.
'''
if self._type_annotation is None:
raise ValueError("Type annotation is not available")
if file is None:
file = sys.stdout
print("%s %s" % (self.entry_name, self.argument_types), file=file)
print('-' * 80, file=file)
print(self._type_annotation, file=file)
print('=' * 80, file=file)
def launch(self, args, griddim, blockdim, stream=0, sharedmem=0):
# Prepare kernel
cufunc = self._func.get()
if self.debug:
excname = cufunc.name + "__errcode__"
excmem, excsz = cufunc.module.get_global_symbol(excname)
assert excsz == ctypes.sizeof(ctypes.c_int)
excval = ctypes.c_int()
excmem.memset(0, stream=stream)
# Prepare arguments
retr = [] # hold functors for writeback
kernelargs = []
for t, v in zip(self.argument_types, args):
self._prepare_args(t, v, stream, retr, kernelargs)
# Configure kernel
cu_func = cufunc.configure(griddim, blockdim,
stream=stream,
sharedmem=sharedmem)
# Invoke kernel
cu_func(*kernelargs)
if self.debug:
driver.device_to_host(ctypes.addressof(excval), excmem, excsz)
if excval.value != 0:
# An error occurred
def load_symbol(name):
mem, sz = cufunc.module.get_global_symbol("%s__%s__" %
(cufunc.name,
name))
val = ctypes.c_int()
driver.device_to_host(ctypes.addressof(val), mem, sz)
return val.value
tid = [load_symbol("tid" + i) for i in 'zyx']
ctaid = [load_symbol("ctaid" + i) for i in 'zyx']
code = excval.value
exccls, exc_args, loc = self.call_helper.get_exception(code)
# Prefix the exception message with the source location
if loc is None:
locinfo = ''
else:
sym, filepath, lineno = loc
filepath = os.path.abspath(filepath)
locinfo = 'In function %r, file %s, line %s, ' % (sym,
filepath,
lineno,)
# Prefix the exception message with the thread position
prefix = "%stid=%s ctaid=%s" % (locinfo, tid, ctaid)
if exc_args:
exc_args = ("%s: %s" % (prefix, exc_args[0]),) + \
exc_args[1:]
else:
exc_args = prefix,
raise exccls(*exc_args)
# retrieve auto converted arrays
for wb in retr:
wb()
def _prepare_args(self, ty, val, stream, retr, kernelargs):
"""
Convert arguments to ctypes and append to kernelargs
"""
# map the arguments using any extension you've registered
for extension in reversed(self.extensions):
ty, val = extension.prepare_args(
ty,
val,
stream=stream,
retr=retr)
if isinstance(ty, types.Array):
devary = wrap_arg(val).to_device(retr, stream)
c_intp = ctypes.c_ssize_t
meminfo = ctypes.c_void_p(0)
parent = ctypes.c_void_p(0)
nitems = c_intp(devary.size)
itemsize = c_intp(devary.dtype.itemsize)
data = ctypes.c_void_p(driver.device_pointer(devary))
kernelargs.append(meminfo)
kernelargs.append(parent)
kernelargs.append(nitems)
kernelargs.append(itemsize)
kernelargs.append(data)
for ax in range(devary.ndim):
kernelargs.append(c_intp(devary.shape[ax]))
for ax in range(devary.ndim):
kernelargs.append(c_intp(devary.strides[ax]))
elif isinstance(ty, types.Integer):
cval = getattr(ctypes, "c_%s" % ty)(val)
kernelargs.append(cval)
elif ty == types.float64:
cval = ctypes.c_double(val)
kernelargs.append(cval)
elif ty == types.float32:
cval = ctypes.c_float(val)
kernelargs.append(cval)
elif ty == types.boolean:
cval = ctypes.c_uint8(int(val))
kernelargs.append(cval)
elif ty == types.complex64:
kernelargs.append(ctypes.c_float(val.real))
kernelargs.append(ctypes.c_float(val.imag))
elif ty == types.complex128:
kernelargs.append(ctypes.c_double(val.real))
kernelargs.append(ctypes.c_double(val.imag))
elif isinstance(ty, (types.NPDatetime, types.NPTimedelta)):
kernelargs.append(ctypes.c_int64(val.view(np.int64)))
elif isinstance(ty, types.Record):
devrec = wrap_arg(val).to_device(retr, stream)
kernelargs.append(devrec)
else:
raise NotImplementedError(ty, val)
class _KernelConfiguration:
def __init__(self, dispatcher, griddim, blockdim, stream, sharedmem):
self.dispatcher = dispatcher
self.griddim = griddim
self.blockdim = blockdim
self.stream = stream
self.sharedmem = sharedmem
def __call__(self, *args):
return self.dispatcher.call(args, self.griddim, self.blockdim,
self.stream, self.sharedmem)
class Dispatcher(serialize.ReduceMixin):
'''
CUDA Dispatcher object. When configured and called, the dispatcher will
specialize itself for the given arguments (if no suitable specialized
version already exists) & compute capability, and launch on the device
associated with the current context.
Dispatcher objects are not to be constructed by the user, but instead are
created using the :func:`numba.cuda.jit` decorator.
'''
def __init__(self, func, sigs, bind, targetoptions):
super().__init__()
self.py_func = func
self.sigs = []
self._bind = bind
self.link = targetoptions.pop('link', (),)
self._can_compile = True
# keyed by a `(compute capability, args)` tuple
self.definitions = {}
self.specializations = {}
self.targetoptions = targetoptions
# defensive copy
self.targetoptions['extensions'] = \
list(self.targetoptions.get('extensions', []))
from .descriptor import CUDATargetDesc
self.typingctx = CUDATargetDesc.typingctx
if sigs:
if len(sigs) > 1:
raise TypeError("Only one signature supported at present")
self.compile(sigs[0])
self._can_compile = False
def configure(self, griddim, blockdim, stream=0, sharedmem=0):
griddim, blockdim = normalize_kernel_dimensions(griddim, blockdim)
return _KernelConfiguration(self, griddim, blockdim, stream, sharedmem)
def __getitem__(self, args):
if len(args) not in [2, 3, 4]:
raise ValueError('must specify at least the griddim and blockdim')
return self.configure(*args)
def forall(self, ntasks, tpb=0, stream=0, sharedmem=0):
"""Returns a 1D-configured kernel for a given number of tasks.
This assumes that:
- the kernel maps the Global Thread ID ``cuda.grid(1)`` to tasks on a
1-1 basis.
- the kernel checks that the Global Thread ID is upper-bounded by
``ntasks``, and does nothing if it is not.
:param ntasks: The number of tasks.
:param tpb: The size of a block. An appropriate value is chosen if this
parameter is not supplied.
:param stream: The stream on which the configured kernel will be
launched.
:param sharedmem: The number of bytes of dynamic shared memory required
by the kernel.
:return: A configured kernel, ready to launch on a set of arguments."""
return ForAll(self, ntasks, tpb=tpb, stream=stream, sharedmem=sharedmem)
@property
def extensions(self):
'''
A list of objects that must have a `prepare_args` function. When a
specialized kernel is called, each argument will be passed through
to the `prepare_args` (from the last object in this list to the
first). The arguments to `prepare_args` are:
- `ty` the numba type of the argument
- `val` the argument value itself
- `stream` the CUDA stream used for the current call to the kernel
- `retr` a list of zero-arg functions that you may want to append
post-call cleanup work to.
The `prepare_args` function must return a tuple `(ty, val)`, which
will be passed in turn to the next right-most `extension`. After all
the extensions have been called, the resulting `(ty, val)` will be
passed into Numba's default argument marshalling logic.
'''
return self.targetoptions['extensions']
def __call__(self, *args, **kwargs):
# An attempt to launch an unconfigured kernel
raise ValueError(missing_launch_config_msg)
def call(self, args, griddim, blockdim, stream, sharedmem):
'''
Compile if necessary and invoke this kernel with *args*.
'''
argtypes = tuple(
[self.typingctx.resolve_argument_type(a) for a in args])
kernel = self.compile(argtypes)
kernel.launch(args, griddim, blockdim, stream, sharedmem)
def specialize(self, *args):
'''
Create a new instance of this dispatcher specialized for the given
*args*.
'''
cc = get_current_device().compute_capability
argtypes = tuple(
[self.typingctx.resolve_argument_type(a) for a in args])
if self.specialized:
raise RuntimeError('Dispatcher already specialized')
specialization = self.specializations.get((cc, argtypes))
if specialization:
return specialization
targetoptions = self.targetoptions
targetoptions['link'] = self.link
specialization = Dispatcher(self.py_func, [types.void(*argtypes)],
self._bind, targetoptions)
self.specializations[cc, argtypes] = specialization
return specialization
def disable_compile(self, val=True):
self._can_compile = not val
@property
def specialized(self):
"""
True if the Dispatcher has been specialized.
"""
return len(self.sigs) == 1 and not self._can_compile
@property
def definition(self):
# There is a single definition only when the dispatcher has been
# specialized.
if not self.specialized:
raise ValueError("Dispatcher needs to be specialized to get the "
"single definition")
return next(iter(self.definitions.values()))
@property
def _func(self, signature=None, compute_capability=None):
cc = compute_capability or get_current_device().compute_capability
if signature is not None:
return self.definitions[(cc, signature)]._func
elif self.specialized:
return self.definition._func
else:
return {sig: defn._func for sig, defn in self.definitions.items()}
def compile(self, sig):
'''
Compile and bind to the current context a version of this kernel
specialized for the given signature.
'''
argtypes, return_type = sigutils.normalize_signature(sig)
assert return_type is None or return_type == types.none
cc = get_current_device().compute_capability
if self.specialized:
return self.definition
else:
kernel = self.definitions.get((cc, argtypes))
if kernel is None:
if not self._can_compile:
raise RuntimeError("Compilation disabled")
kernel = compile_kernel(self.py_func, argtypes,
link=self.link,
**self.targetoptions)
self.definitions[(cc, argtypes)] = kernel
if self._bind:
kernel.bind()
self.sigs.append(sig)
return kernel
def inspect_llvm(self, signature=None, compute_capability=None):
'''
Return the LLVM IR for all signatures encountered thus far, or the LLVM
IR for a specific signature and compute_capability if given. If the
dispatcher is specialized, the IR for the single specialization is
returned.
'''
cc = compute_capability or get_current_device().compute_capability
if signature is not None:
return self.definitions[(cc, signature)].inspect_llvm()
elif self.specialized:
return self.definition.inspect_llvm()
else:
return dict((sig, defn.inspect_llvm())
for sig, defn in self.definitions.items())
def inspect_asm(self, signature=None, compute_capability=None):
'''
Return the generated PTX assembly code for all signatures encountered
thus far, or the PTX assembly code for a specific signature and
compute_capability if given. If the dispatcher is specialized, the
assembly code for the single specialization is returned.
'''
cc = compute_capability or get_current_device().compute_capability
if signature is not None:
return self.definitions[(cc, signature)].inspect_asm()
elif self.specialized:
return self.definition.inspect_asm()
else:
return dict((sig, defn.inspect_asm())
for sig, defn in self.definitions.items())
def inspect_sass(self, signature=None, compute_capability=None):
'''
Return the generated SASS code for all signatures encountered thus
far, or the SASS code for a specific signature and compute_capability
if given.
Requires nvdisasm to be available on the PATH.
'''
cc = compute_capability or get_current_device().compute_capability
if signature is not None:
return self.definitions[(cc, signature)].inspect_sass()
elif self.specialized:
return self.definition.inspect_sass()
else:
return dict((sig, defn.inspect_sass())
for sig, defn in self.definitions.items())
def inspect_types(self, file=None):
'''
Produce a dump of the Python source of this function annotated with the
corresponding Numba IR and type information. The dump is written to
*file*, or *sys.stdout* if *file* is *None*.
'''
if file is None:
file = sys.stdout
if self.specialized:
self.definition.inspect_types(file=file)
else:
for _, defn in self.definitions.items():
defn.inspect_types(file=file)
@property
def ptx(self):
if self.specialized:
return self.definition.ptx
else:
return dict((sig, defn.ptx)
for sig, defn in self.definitions.items())
def bind(self):
for defn in self.definitions.values():
defn.bind()
@classmethod
def _rebuild(cls, py_func, sigs, bind, targetoptions):
"""
Rebuild an instance.
"""
instance = cls(py_func, sigs, bind, targetoptions)
return instance
def _reduce_states(self):
"""
Reduce the instance for serialization.
Compiled definitions are discarded.
"""
return dict(py_func=self.py_func, sigs=self.sigs, bind=self._bind,
targetoptions=self.targetoptions)
|
|
import unittest
import numpy as np
import math as m
from RULEngine.Util.Pose import Pose
from RULEngine.Util.Position import Position
class TestPose(unittest.TestCase):
def test_init(self):
# Default case
pose = Pose()
self.assertTrue(hasattr(pose, 'position'))
self.assertTrue(hasattr(pose, 'orientation'))
self.assertTrue(type(pose.position) is Position)
self.assertTrue(type(pose.orientation) is float)
# From another Pose
pose1 = Pose(Position(1, 1), 1)
pose2 = Pose(pose1)
self.assertEqual(pose1.position, pose2.position)
self.assertEqual(pose1.orientation, pose2.orientation)
self.assertTrue(pose1 is not pose2)
self.assertTrue(pose1.position is not pose2.position)
self.assertTrue(pose1.orientation is not pose2.orientation)
pose1 = Pose(Position(1, 1), 1 + 2*m.pi)
self.assertAlmostEqual(pose1.orientation, 1)
pose2 = Pose(pose1)
self.assertAlmostEqual(pose2.orientation, 1)
# From a size 3 numpy array
my_array = np.array([1, 1, np.pi/4])
pose = Pose(my_array)
self.assertEqual(pose.position, Position(1, 1))
self.assertEqual(pose.orientation, np.pi/4)
my_array[0] = 2
self.assertNotEqual(pose.position, Position(2, 1))
self.assertTrue(pose.orientation is not my_array[2])
my_array = np.array([1, 1, np.pi/4 + 2*np.pi])
pose = Pose(my_array)
self.assertEqual(pose.position, Position(1, 1))
self.assertAlmostEqual(pose.orientation, np.pi/4)
# From positional arguments (Position(), orientation)
my_position = Position(1, 2)
pose = Pose(my_position, 3)
self.assertEqual(pose.position, Position(1, 2))
self.assertEqual(pose.orientation, 3)
self.assertTrue(pose.position is not my_position)
# From positional arguments (x, y, orientation)
pose = Pose(1, 2, 3)
self.assertEqual(pose.position, Position(1, 2))
self.assertEqual(pose.orientation, 3)
pose = Pose(1, 2, 3 + 2*m.pi)
self.assertAlmostEqual(pose.orientation, 3)
# From a size 2 numpy array and orientation (np.array, orientation)
my_array = np.array([1, 2])
pose = Pose(my_array)
self.assertEqual(pose.position, Position(1, 2))
self.assertEqual(pose.orientation, 0)
my_array[0] = 2
self.assertNotEqual(pose.position, Position(2, 2))
my_array = np.array([1, 2])
pose = Pose(my_array, 3)
self.assertEqual(pose.position, Position(1, 2))
self.assertEqual(pose.orientation, 3)
my_array[0] = 2
self.assertNotEqual(pose.position, Position(2, 2))
my_array = np.array([1, 2])
pose = Pose(my_array, 3 + 2*m.pi)
self.assertAlmostEqual(pose.orientation, 3)
# Error cases
with self.assertRaises(ValueError):
Pose(0)
with self.assertRaises(ValueError):
Pose(0, 0, 0, 0)
with self.assertRaises(ValueError):
Pose({})
with self.assertRaises(ValueError):
Pose([])
with self.assertRaises(ValueError):
Pose([0])
with self.assertRaises(ValueError):
Pose([0], 0)
with self.assertRaises(ValueError):
Pose([0, 0, 0])
with self.assertRaises(ValueError):
Pose([0, 0, 0], 0)
with self.assertRaises(ValueError):
Pose(())
with self.assertRaises(ValueError):
Pose((), 0)
with self.assertRaises(ValueError):
Pose((0, 0, 0))
with self.assertRaises(ValueError):
Pose((0, 0, 0), 0)
with self.assertRaises(ValueError):
Pose(np.zeros(1))
with self.assertRaises(AssertionError):
Pose(np.zeros(1), 0)
with self.assertRaises(ValueError):
Pose(np.zeros(4))
with self.assertRaises(AssertionError):
Pose(np.zeros(4), 0)
def test_get_set(self):
pose = Pose(Position(1, 2), 3)
self.assertTrue(hasattr(pose, 'position'))
self.assertTrue(hasattr(pose, 'orientation'))
self.assertTrue(pose.position == Position(1, 2))
self.assertTrue(pose.orientation == 3)
pose.position = Position(2, 3)
pose.orientation = 1
self.assertTrue(pose.position == Position(2, 3))
self.assertTrue(pose.orientation == 1)
self.assertEqual(pose[0], 2)
self.assertEqual(pose[1], 3)
self.assertEqual(pose[2], 1)
with self.assertRaises(IndexError):
pose[3]
def test_add(self):
pose1 = Pose(Position(1, 2), 3)
pose2 = Pose(Position(5, 7), 9)
pose3 = pose1 + pose2
self.assertEqual(pose3.position, Position(6, 9))
self.assertEqual(pose3.orientation, 12 - 4*m.pi)
pos = Position(10, 20)
pose4 = pose1 + pos
self.assertEqual(pose4.position, Position(11, 22))
self.assertEqual(pose4.orientation, 3)
with self.assertRaises(TypeError):
pos + pose1
def test_sub(self):
pose1 = Pose(Position(1, 2), 3)
pose2 = Pose(Position(5, 7), 9)
pose3 = pose1 - pose2
self.assertEqual(pose3.position, Position(-4, -5))
self.assertEqual(pose3.orientation, -6 + 2*m.pi)
pos = Position(10, 20)
pose4 = pose1 - pos
self.assertEqual(pose4.position, Position(-9, -18))
self.assertEqual(pose4.orientation, 3)
with self.assertRaises(TypeError):
pos - pose1
def test_eq(self):
self.assertEqual(Pose(), Pose())
from RULEngine.Util.Pose import ORIENTATION_ABSOLUTE_TOLERANCE
tol = 0.9999 * ORIENTATION_ABSOLUTE_TOLERANCE
self.assertEqual(Pose(Position(), 1), Pose(Position(), 1 + tol))
self.assertEqual(Pose(Position(), 1), Pose(Position(), 1 - tol))
self.assertNotEqual(Pose(Position(), 1), Pose(Position(), 1 + 1.1*tol))
self.assertNotEqual(Pose(Position(), 1), Pose(Position(), 1 - 1.1*tol))
self.assertEqual(Pose(Position(), 0), Pose(Position(), +tol))
self.assertEqual(Pose(Position(), 0), Pose(Position(), -tol))
self.assertEqual(Pose(Position(), +tol), Pose(Position(), 0))
self.assertEqual(Pose(Position(), -tol), Pose(Position(), 0))
self.assertNotEqual(Pose(Position(), 0), Pose(Position(), +1.1*tol))
self.assertNotEqual(Pose(Position(), 0), Pose(Position(), -1.1*tol))
self.assertNotEqual(Pose(Position(), +1.1*tol), Pose(Position(), 0))
self.assertNotEqual(Pose(Position(), -1.1*tol), Pose(Position(), 0))
self.assertEqual(Pose(Position(), 0), Pose(Position(), 2*m.pi + tol))
self.assertEqual(Pose(Position(), 0), Pose(Position(), 2*m.pi - tol))
self.assertNotEqual(Pose(Position(), 0), Pose(Position(), 2*m.pi + 1.1*tol))
self.assertNotEqual(Pose(Position(), 0), Pose(Position(), 2*m.pi - 1.1*tol))
self.assertEqual(Pose(Position(), m.pi), Pose(Position(), -m.pi))
self.assertEqual(Pose(Position(), m.pi+tol), Pose(Position(), -m.pi))
self.assertEqual(Pose(Position(), m.pi), Pose(Position(), -m.pi+tol))
self.assertEqual(Pose(Position(), m.pi-tol), Pose(Position(), -m.pi))
self.assertEqual(Pose(Position(), m.pi), Pose(Position(), -m.pi - tol))
self.assertNotEqual(Pose(Position(), m.pi+1.1*tol), Pose(Position(), -m.pi))
self.assertNotEqual(Pose(Position(), m.pi), Pose(Position(), -m.pi+1.1*tol))
self.assertNotEqual(Pose(Position(), m.pi-1.1*tol), Pose(Position(), -m.pi))
self.assertNotEqual(Pose(Position(), m.pi), Pose(Position(), -m.pi-1.1*tol))
def test_wrap_to_pi(self):
self.assertEqual(Pose.wrap_to_pi(0), 0)
self.assertEqual(Pose.wrap_to_pi(-1), -1)
self.assertEqual(Pose.wrap_to_pi(1), 1)
self.assertEqual(Pose.wrap_to_pi(m.pi), -m.pi)
self.assertEqual(Pose.wrap_to_pi(-m.pi), -m.pi)
self.assertEqual(Pose.wrap_to_pi(2 * m.pi), 0)
def test_compare_orientation(self):
# numeric input
pose = Pose(Position(), 0)
self.assertTrue(pose.compare_orientation(0))
tol = 0.01
self.assertTrue(pose.compare_orientation(0.00399, tol))
self.assertTrue(pose.compare_orientation(-0.00399, tol))
pose.orientation = m.pi
self.assertTrue(pose.compare_orientation(m.pi))
self.assertTrue(pose.compare_orientation(-m.pi))
tol = 0.01
self.assertTrue(pose.compare_orientation(m.pi+0.999*tol, tol))
self.assertTrue(pose.compare_orientation(-m.pi-0.999*tol, tol))
self.assertTrue(pose.compare_orientation(m.pi-0.999*tol, tol))
self.assertTrue(pose.compare_orientation(-m.pi+0.999*tol, tol))
self.assertFalse(pose.compare_orientation(m.pi+1.001*tol, tol))
self.assertFalse(pose.compare_orientation(-m.pi-1.001*tol, tol))
self.assertFalse(pose.compare_orientation(m.pi-1.001*tol, tol))
self.assertFalse(pose.compare_orientation(-m.pi+1.001*tol, tol))
pose1 = Pose(Position(1, 1), 0)
pose2 = Pose(Position(10, 10), 0)
self.assertTrue(pose1.compare_orientation(pose2))
def test_to_array(self):
pose = Pose(Position(1, 2), 3)
pose_array = pose.to_array()
self.assertEqual(pose_array[0], 1)
self.assertEqual(pose_array[1], 2)
self.assertEqual(pose_array[2], 3)
self.assertTrue(type(pose_array) is np.ndarray)
def test_to_tuple(self):
uut = Pose()
#sanity check
self.assertNotEqual(type(uut.to_tuple()), type(Pose()))
self.assertEqual(type(uut.to_tuple()), type(tuple()))
self.assertEqual(uut.to_tuple(), tuple((0, 0)))
uut = Pose(Position(557, -778.5), 0)
self.assertEqual(uut.to_tuple(), tuple((557, -778.5)))
self.assertNotEqual(uut.to_tuple(), tuple((-42, 3897)))
|
|
# ________________________________________________________________________
#
# Copyright (C) 2014 Andrew Fullford
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ________________________________________________________________________
#
import sys, os, time, errno, select, logging
from . import utils
from .utils import ses
# These values are used internally to select watch mode.
#
WF_POLLING = 0
WF_KQUEUE = 1
WF_INOTIFYX = 2
wf_pynotifyx_available = False
try:
#import pynotifyx.__main__ as pynotifyx
import pynotifyx
if callable(pynotifyx.init):
wf_pynotifyx_available = True
except:
pass
class watch(object):
"""
Sets up an instance that can be included in a select/poll set. The
descriptor will become readable whenever registered files change.
Because the class has a fileno() method, the class instance can generally
be used directly with select/poll.
The intent of this interface is to insulate the caller from the
system-dependent implementations of Unix file system event notification
(kevent on *BSD/MacOS, inotify on Linux). The interface also supports
a polling mode which is much less efficient, but probably better than
nothing.
At the moment, inotify is not supported, so Linux systems will operate
in polling mode.
Apart from simplifying the use of select.kqueue calls, the intent is to
mask changes that might be needed if linux inotify needs to be supported.
Because of this, the interface avoids providing features that would be
hard to implement in one or other interface.
All open file descriptors are automatically closed when the instance
is removed.
The following params can be set when the class is initialized and
overridden in methods where appropriate.
timeout - Aggregation timeout. Because file change events
tend to arrive in bursts, setting an aggregation
timeout limits the number of calls and cuts
duplication of changes on a single file. The
effect is that the method will keep retrieving
events until none arrive within the timeout period.
That means the get() method will block for at least
the timeout period, so the timeout should be small
(perhaps 0.1 seconds). The default is 0 which
means the get() method will return immediately.
Note that even with a zero timeout, get() may still
return multiple events if multiple changes are
pending when it is called.
limit - Limit the number of change events that will
collected due to the aggregation timeout. The
default is None (no limit). The value is ignored
if a timeout is not set. Note that the limit may
be exceeded if the last event read returns more
than one event.
commit - If False, skip rebuilding watch list after each
add() or remove(). The caller should then call
commit() directly to commit changes.
missing - If True, a file does not need to pre-exist
when add() is called. In addition, a file added
with this flag set can disappear and reappear which
will cause an event each time. With the flag False
which is the default, add() and get() will raise
exceptions if the file is initially missing or is
removed or renamed, and the file will cease being
watched until add() is called again.
polling - Force the interface into polling mode. Only available
when instantiating the class. Polling mode has no
practical advantage over file system events so this
param really exists for testing polling mode.
log - A logging instance.
"""
def __init__(self, polling=False, **params):
self._params = params
self._mode_map = dict((val, nam) for nam, val in globals().items() if nam.startswith('WF_'))
# Set up the access mode. If select.kqueue() is callable, WF_KQUEUE
# mode will be used, otherwise polling will be used. The get_mode()
# method supplies read-only access to th attribute. The value is not
# settable after the class is instantiated.
#
if polling:
self._mode = WF_POLLING
elif wf_pynotifyx_available:
self._mode = WF_INOTIFYX
elif 'kqueue' in dir(select) and callable(select.kqueue):
self._mode = WF_KQUEUE
else:
self._mode = WF_POLLING
# Holds all paths that have been added, whether actually being watched or not.
self.paths = {}
# Holds paths that have been opened and are being watched
#
self.paths_open = {}
# Holds paths where "missing" was True and the path could not be opened.
#
self.paths_pending = {}
# Associates all open file descriptors and the opened path
#
self.fds_open = {}
# Provided to caller to observe the last set of changes. The
# value of the dict is the time the change was noted.
#
self.last_changes = {}
self._discard = logging.getLogger(__name__)
self._discard.addHandler(logging.NullHandler())
self.unprocessed_event = None
if self._mode == WF_KQUEUE:
# Immediately create a kernel event queue so that an immediate
# call to fileno() will return the correct controlling fd.
#
self._kq = select.kqueue()
elif self._mode == WF_INOTIFYX:
# Immediately create an pynotifyx channel identified by a
# file descriptor.
#
self._inx_fd = pynotifyx.init()
# This is the standard mask used for watches. It is setup
# to only trigger events when somethingn changes.
#
self._inx_mask = pynotifyx.IN_ALL_EVENTS & ~(pynotifyx.IN_ACCESS | pynotifyx.IN_CLOSE | pynotifyx.IN_OPEN)
# Record inode of watched paths to work around simfs bug
#
self._inx_inode = {}
elif self._mode == WF_POLLING:
self._self_pipe()
self._poll_stat = {}
# Holds paths that were removed or renamed until get() is
# called.
#
self._poll_pending = {}
def __del__(self):
self.close()
def close(self):
close_fds = True
if self._mode == WF_KQUEUE and self._kq:
# The is actually auto-closed, so this bit is not
# strictly needed.
#
try: self._kq.close()
except: pass
self._kq = None
elif self._mode == WF_INOTIFYX:
# As we are storing inotify watch-descriptors rather
# than file descriptors in fds_open, skip closing them.
# They are automatically cleared when _inx_fd is closed
#
close_fds = False
try: os.close(self._inx_fd)
except: pass
self._inx_fd = None
elif self._mode == WF_POLLING:
for fd in [self._poll_fd, self._poll_send]:
try: os.close(fd)
except: pass
# However, these are not automatically closed, so we
# definitely need the destructor here.
#
if close_fds:
for fd in list(self.fds_open):
try: os.close(fd)
except: pass
del self.fds_open[fd]
def fileno(self):
if self._mode == WF_KQUEUE:
return self._kq.fileno()
elif self._mode == WF_INOTIFYX:
return self._inx_fd
else:
return self._poll_fd
def get_mode(self):
return self._mode
def get_mode_name(self, mode=None):
if mode is None:
mode = self._mode
if mode in self._mode_map:
return self._mode_map[mode]
else:
return "Mode" + str(mode)
def _getparam(self, tag, default = None, **params):
val = params.get(tag)
if val is None:
val = self._params.get(tag)
if val is None:
val = default
return val
def _close(self, fd):
"""
Close the descriptor used for a path regardless
of mode.
"""
if self._mode == WF_INOTIFYX:
try: pynotifyx.rm_watch(self._inx_fd, fd)
except: pass
else:
try: os.close(fd)
except: pass
def _self_pipe(self):
"""
This sets up a self-pipe so we can hand back an fd to the caller
allowing the object to manage event triggers. The ends of the pipe are
set non-blocking so it doesn't really matter if a bunch of events fill
the pipe buffer.
"""
import fcntl
self._poll_fd, self._poll_send = os.pipe()
for fd in [self._poll_fd, self._poll_send]:
fl = fcntl.fcntl(fd, fcntl.F_GETFL)
fcntl.fcntl(fd, fcntl.F_SETFL, fl | os.O_NONBLOCK)
def _disappeared(self, fd, path, **params):
"""
Called when an open path is no longer acessible. This will either
move the path to pending (if the 'missing' param is set for the
file), or fire an exception.
"""
log = self._getparam('log', self._discard, **params)
log.debug("Path %r removed or renamed, handling removal", path)
self._close(fd)
if self._mode == WF_POLLING and fd in self._poll_stat:
del self._poll_stat[fd]
if self._mode == WF_INOTIFYX and path in self._inx_inode:
del self._inx_inode[path]
del self.fds_open[fd]
del self.paths_open[path]
if self.paths[path]:
try:
if self._add_file(path, **params):
log.debug("Path %r immediately reappeared, pending transition skipped", path)
return
except Exception as e:
log.debug("Path %r reappearance check failed -- %s", path, e)
log.debug("Path %r marked as pending", path)
self.paths_pending[path] = True
else:
del self.paths[path]
raise Exception("Path %r has been removed or renamed" % path)
def _poll_get_stat(self, fd, path):
"""
Check the status of an open path. Note that we have to use stat() rather
than fstat() because we want to detect file removes and renames.
"""
try:
st = os.stat(path)
fstate = (st.st_mode, st.st_nlink, st.st_uid, st.st_gid, st.st_size, st.st_mtime)
except Exception as e:
log = self._getparam('log', self._discard)
log.debug("stat failed on %s -- %s", path, e)
self._poll_pending[path] = time.time()
self._disappeared(fd, path)
fstate = None
return fstate
def _poll_trigger(self):
"""
Trigger activity for the caller by writting a NUL to the self-pipe.
"""
try:
os.write(self._poll_send, '\0'.encode('utf-8'))
except Exception as e:
log = self._getparam('log', self._discard)
log.debug("Ignoring self-pipe write error -- %s", e)
def _clean_failed_fds(self, fdlist):
for fd in fdlist:
if fd in self.fds_open:
path = self.fds_open[fd]
del self.fds_open[fd]
if self._mode == WF_INOTIFYX and path in self._inx_inode:
del self._inx_inode[path]
if path in self.paths_open:
del self.paths_open[path]
self._close(fd)
def _trigger(self, fd, **params):
"""
We need events to fire on appearance because the code
doesn't see the file until after it has been created.
In WF_KQUEUE mode, this simulates triggering an event by firing
a oneshot timer event to fire immediately (0 msecs). Because
this uses the file descriptor as the timer identity and get() doesn't
care what filter actually fired the event, the outside world sees
this as a file change.
In WF_INOTIFYX mode, this triggers an event by setting IN_OPEN on
the inotify watch, opening the file in read-only mode, closing it,
and removing the IN_OPEN setting. The file is not discovered unless
it can be opened so this is reliable.
In WF_POLLING mode, this resets our knowledge of the stat
info, and then triggers file activity to wake up the caller.
"""
log = self._getparam('log', self._discard, **params)
if self._mode == WF_KQUEUE:
try:
ev = select.kevent(fd, filter=select.KQ_FILTER_TIMER,
flags=select.KQ_EV_ADD | select.KQ_EV_CLEAR | select.KQ_EV_ONESHOT, data=0)
self._kq.control([ev], 0, 0)
log.debug("Added timer event following pending file promotion")
except Exception as e:
log.error("Failed to add timer event following pending file promotion -- %s", e)
elif self._mode == WF_INOTIFYX:
if fd in self.fds_open:
try:
path = self.fds_open[fd]
nfd = pynotifyx.add_watch(self._inx_fd, path, self._inx_mask|pynotifyx.IN_OPEN)
if nfd != fd:
raise Exception("Assertion failed: IN_OPEN add_watch() set gave new wd")
tfd = os.open(path, os.O_RDONLY)
try: os.close(tfd)
except: pass
nfd = pynotifyx.add_watch(self._inx_fd, path, self._inx_mask)
if nfd != fd:
raise Exception("Assertion failed: IN_OPEN add_watch() clear gave new wd")
except Exception as e:
log.error("Failed to trigger event via os.open() following pending file promotion -- %s", e)
else:
log.error("Pending file promotion of unknown wd %d failed", fd)
elif self._mode == WF_POLLING:
self._poll_stat[fd] = ()
self._poll_trigger()
def _add_file(self, path, **params):
"""
Attempt to add a file to the system monitoring mechanism.
"""
log = self._getparam('log', self._discard, **params)
fd = None
try:
fd = os.open(path, os.O_RDONLY)
except Exception as e:
if not self.paths[path]:
log.error("Open failed on watched path %r -- %s", path, e, exc_info=log.isEnabledFor(logging.DEBUG))
raise e
elif path in self.paths_pending:
log.debug("path %r is still pending -- %s", path, e)
else:
self.paths_pending[path] = True
log.debug("Added %r to pending list after open failure -- %s", path, e)
return False
if self._mode == WF_KQUEUE:
log.debug("path %s opened as fd %d", path, fd)
try:
ev = select.kevent(fd,
filter=select.KQ_FILTER_VNODE,
flags=select.KQ_EV_ADD | select.KQ_EV_CLEAR,
fflags=select.KQ_NOTE_WRITE | select.KQ_NOTE_ATTRIB | select.KQ_NOTE_LINK |
select.KQ_NOTE_DELETE | select.KQ_NOTE_RENAME)
self._kq.control([ev], 0, 0)
except Exception as e:
log.error("kevent failed on watched path %r -- %s", path, e)
try: os.close(fd)
except: pass
raise e
elif self._mode == WF_INOTIFYX:
# inotify doesn't need the target paths open, so now it is known to be
# accessible, close the actual fd and use the watch-descriptor as the fd.
#
# However, due to an apparent simfs bug where inotify does not fire either
# IN_DELETE_SELF or IN_MOVE_SELF, we need to record the inode so that we
# can detect deletes and renames internally. simfs is used in containers.
#
try:
s = os.fstat(fd)
self._inx_inode[path] = s.st_ino
except Exception as e:
log.error("fstat(%d) failed on open path %r -- %s", fd, path, e)
try: os.close(fd)
except: pass
raise e
try: os.close(fd)
except: pass
try:
fd = pynotifyx.add_watch(self._inx_fd, path, self._inx_mask)
log.debug("path %s watched with wd %d", path, fd)
except Exception as e:
log.error("inotify failed on watched path %r -- %s", path, e)
raise e
elif self._mode == WF_POLLING:
log.debug("path %s opened as fd %d", path, fd)
fstate = self._poll_get_stat(fd, path)
if fstate:
self._poll_stat[fd] = fstate
self.paths_open[path] = fd
self.fds_open[fd] = path
return True
def commit(self, **params):
"""
Rebuild kevent operations by removing open files that no longer need to
be watched, and adding new files if they are not currently being watched.
This is done by comparing self.paths to self.paths_open.
"""
log = self._getparam('log', self._discard, **params)
# Find all the modules that no longer need watching
#
removed = 0
added = 0
for path in list(self.paths_open):
if path not in self.paths:
fd = self.paths_open[path]
if self._mode == WF_KQUEUE:
# kevent automatically deletes the event when the fd is closed
try:
os.close(fd)
except Exception as e:
log.warning("close failed on watched file %r -- %s", path, e)
elif self._mode == WF_INOTIFYX:
try:
pynotifyx.rm_watch(self._inx_fd, fd)
except Exception as e:
log.warning("remove failed on watched file %r -- %s", path, e)
if path in self._inx_inode:
del self._inx_inode[path]
elif self._mode == WF_POLLING:
if fd in self._poll_stat:
del self._poll_stat[fd]
else:
log.warning("fd watched path %r missing from _poll_stat map", path)
try:
os.close(fd)
except Exception as e:
log.warning("close failed on watched file %r -- %s", path, e)
if fd in self.fds_open:
del self.fds_open[fd]
else:
log.warning("fd watched path %r missing from fd map", path)
del self.paths_open[path]
log.debug("Removed watch for path %r", path)
removed += 1
# Find all the paths that are new and should be watched
#
fdlist = []
failed = []
last_exc = None
log.debug("%d watched path%s", len(self.paths), ses(len(self.paths)))
for path in list(self.paths):
if path not in self.paths_open:
try:
if not self._add_file(path, **params):
continue
except Exception as e:
last_exc = e
failed.append(path)
continue
fdlist.append(self.paths_open[path])
if path in self.paths_pending:
log.debug("pending path %r has now appeared", path)
del self.paths_pending[path]
self._trigger(self.paths_open[path], **params)
added += 1
log.debug("Added watch for path %r with ident %d", path, self.paths_open[path])
if failed:
self._clean_failed_fds(fdlist)
raise Exception("Failed to set watch on %s -- %s" % (str(failed), str(last_exc)))
log.debug("%d added, %d removed", added, removed)
def get(self, **params):
"""
Return a list of watched paths that where affected by recent
changes, following a successful poll() return for the controlling
file descriptor.
If param "timeout" is greater than 0, the event queue will be read
multiple times and reads continue until a timeout occurs.
With a timeout active, if param "limit" is greater than 0,
event reads will stop when the number of changes exceeds the
limit. This guarantees that the time the method will block
will never be greater than timeout*limit seconds.
Note that with a timeout active, multiple changes to a
single path will only be reported once.
"""
log = self._getparam('log', self._discard, **params)
self.last_changes = {}
timeout = self._getparam('timeout', 0, **params)
if not timeout or timeout < 0:
timeout = 0
limit = self._getparam('limit', None, **params)
if not limit or limit < 0:
limit = None
max_events = limit if limit else 10000
if self.unprocessed_event:
log.debug("Will handle unprocessed event")
if self._mode == WF_KQUEUE:
evagg = {}
while True:
try:
evlist = self._kq.control(None, max_events, timeout)
except OSError as e:
if e.errno == errno.EINTR:
break
raise e
if not evlist:
break
log.debug("kq.control() returned %d event%s", len(evlist), ses(len(evlist)))
for ev in evlist:
if ev.ident in self.fds_open:
path = self.fds_open[ev.ident]
if path in evagg:
evagg[path].fflags |= ev.fflags
else:
evagg[path] = ev
if limit and len(evagg) >= limit:
break
for path, ev in evagg.items():
if ev.fflags & (select.KQ_NOTE_DELETE | select.KQ_NOTE_RENAME):
self._disappeared(ev.ident, path, **params)
self.last_changes[path] = time.time()
log.debug("Change on %r", path)
elif self._mode == WF_INOTIFYX:
evagg = {}
while True:
try:
evlist = pynotifyx.get_events(self._inx_fd, timeout)
except IOError as e:
if e.errno == errno.EINTR:
break
raise e
if not evlist:
break
log.debug("pynotifyx.get_events() returned %d event%s", len(evlist), ses(len(evlist)))
for ev in evlist:
if ev.wd in self.fds_open:
path = self.fds_open[ev.wd]
if path in evagg:
evagg[path].mask |= ev.mask
else:
evagg[path] = ev
elif ev.mask & pynotifyx.IN_IGNORED:
log.debug("skipping IN_IGNORED event on unknown wd %d", ev.wd)
else:
log.warning("attempt to handle unknown inotify event wd %d", ev.wd)
if limit and len(evagg) >= limit:
break
for path, ev in evagg.items():
log.debug("Change on %r -- %s", path, ev.get_mask_description())
if ev.mask & (pynotifyx.IN_DELETE_SELF | pynotifyx.IN_MOVE_SELF):
self._disappeared(ev.wd, path, **params)
elif ev.mask & pynotifyx.IN_ATTRIB:
file_move_del = False
try:
s = os.stat(path)
if s.st_ino != self._inx_inode[path]:
file_move_del = True
log.info("'simfs' (used with containers) bug detected -- %r moved", path)
except Exception as e:
file_move_del = True
log.info("'simfs' (used with containers) bug detected -- %r removed", path)
if file_move_del:
self._disappeared(ev.wd, path, **params)
self.last_changes[path] = time.time()
elif self._mode == WF_POLLING:
# Consume any pending data from the self-pipe. Read
# until EOF. The fd is already non-blocking so this
# terminates on zero read or any error.
#
cnt = 0
while True:
try:
data = os.read(self._poll_fd, 1024)
if data == '':
break
cnt += len(data)
except OSError as e:
if e.errno != errno.EAGAIN:
log.warning("Ignoring self-pipe read failure -- %s", e)
break
except Exception as e:
log.warning("Ignoring self-pipe read failure -- %s", e)
break
log.debug("Self-pipe read consumed %d byte%s", cnt, ses(cnt))
now = time.time()
for path in self._poll_pending:
self.last_changes[path] = self._poll_pending[path]
self._poll_pending = {}
for fd in list(self._poll_stat):
path = self.fds_open[fd]
fstate = self._poll_get_stat(fd, path)
if fstate is None:
self.last_changes[path] = now
elif self._poll_stat[fd] != fstate:
self._poll_stat[fd] = fstate
self.last_changes[path] = now
log.debug("Change on %r", path)
else:
raise Exception("Unsupported polling mode " + self.get_mode_name())
paths = list(self.last_changes)
paths.sort()
log.debug("Change was to %d path%s", len(paths), ses(len(paths)))
return paths
def add(self, paths, **params):
"""
Add a path (or list of paths) to the list of paths being
watched. The 'missing' setting for a file can also be
changed by re-adding the file.
"""
log = self._getparam('log', self._discard, **params)
missing = self._getparam('missing', True, **params)
commit = self._getparam('commit', True, **params)
if type(paths) is not list:
paths = [paths]
rebuild = False
for path in paths:
if path in self.paths:
if self.paths[path] == missing:
log.info("Ignoring attempt to add existing path %r", path)
else:
log.debug("Changing missing state from %s to %s on existing path %r",
str(self.paths[path]), str(missing), path)
self.paths[path] = missing
else:
log.debug("Adding path %r", path)
self.paths[path] = missing
rebuild = True
if commit and rebuild:
self.commit(**params)
def remove(self, paths, **params):
"""
Delete paths from the watched list.
"""
log = self._getparam('log', self._discard, **params)
commit = self._getparam('commit', True, **params)
if type(paths) is not list:
paths = [paths]
rebuild = False
for path in paths:
if path in self.paths_pending:
del self.paths_pending[path]
if path in self.paths:
del self.paths[path]
rebuild = True
else:
log.error("Attempt to remove %r which was never added", path)
raise Exception("Path %r has never been added" % path)
if commit and rebuild:
self.commit(**params)
def scan(self, **params):
"""
This method should be called periodically if files were added
with "missing=False". It will check for the appearance of missing
files and ensure an event will be triggered for any that appear.
It also needs to be called if the instance could be in WF_POLLING
mode as file system changes will only be detected in WF_POLLING
mode when scan() is called.
The method should be called frequently (perhaps every 1-5 seconds)
as part of idle processing in a select/poll loop.
The approach is intended to support file appearance and disappearance
using kqueue/kevent on BSD while retaining the ability in the
future to transparently support inotify on Linux without any code
or efficiency impact on callers.
For WF_KQUEUE and WF_INOTIFYX mode, processing consists of determining
that a pending path is now accessible, and then calling commit() to
make the necessary adjustments. This will be efficient as long as the
list of pending paths is small.
At some point this should be optimized by watching the directory of
a pending target (assuming it exists). If the directory changes to
indicate the pending path might have appeared, then the next get()
call should perform a scan().
For WF_POLLING mode, the entire list of open files is also scanned
looking for significant differences in the os.fstat info.
If/when inotify is supported here, it is expected that the scan()
method would be a no-op.
"""
log = self._getparam('log', self._discard, **params)
pending = len(self.paths_pending)
log.debug("Checking %d pending path%s", pending, ses(pending))
for path in self.paths_pending:
if os.path.exists(path):
log.debug("pending path %s now accessible, triggering commit()", path)
self.commit(**params)
return
if self._mode == WF_POLLING:
log.debug("Checking %d open path%s", len(self._poll_stat), ses(len(self._poll_stat)))
for fd in list(self._poll_stat):
fstate = self._poll_get_stat(fd, self.fds_open[fd])
if fstate is None or self._poll_stat[fd] != fstate:
self._poll_trigger()
break
|
|
# -*- coding: utf-8 -*-
"""
Tests date parsing functionality for all of the
parsers defined in parsers.py
"""
from distutils.version import LooseVersion
from datetime import datetime, date
import pytest
import numpy as np
import pandas._libs.lib as lib
from pandas._libs.lib import Timestamp
import pandas as pd
import pandas.io.parsers as parsers
import pandas.core.tools.datetimes as tools
import pandas.util.testing as tm
import pandas.io.date_converters as conv
from pandas import DataFrame, Series, Index, DatetimeIndex, MultiIndex
from pandas import compat
from pandas.compat import parse_date, StringIO, lrange
from pandas.compat.numpy import np_array_datetime64_compat
from pandas.core.indexes.datetimes import date_range
class ParseDatesTests(object):
def test_separator_date_conflict(self):
# Regression test for gh-4678: make sure thousands separator and
# date parsing do not conflict.
data = '06-02-2013;13:00;1-000.215'
expected = DataFrame(
[[datetime(2013, 6, 2, 13, 0, 0), 1000.215]],
columns=['Date', 2]
)
df = self.read_csv(StringIO(data), sep=';', thousands='-',
parse_dates={'Date': [0, 1]}, header=None)
tm.assert_frame_equal(df, expected)
def test_multiple_date_col(self):
# Can use multiple date parsers
data = """\
KORD,19990127, 19:00:00, 18:56:00, 0.8100, 2.8100, 7.2000, 0.0000, 280.0000
KORD,19990127, 20:00:00, 19:56:00, 0.0100, 2.2100, 7.2000, 0.0000, 260.0000
KORD,19990127, 21:00:00, 20:56:00, -0.5900, 2.2100, 5.7000, 0.0000, 280.0000
KORD,19990127, 21:00:00, 21:18:00, -0.9900, 2.0100, 3.6000, 0.0000, 270.0000
KORD,19990127, 22:00:00, 21:56:00, -0.5900, 1.7100, 5.1000, 0.0000, 290.0000
KORD,19990127, 23:00:00, 22:56:00, -0.5900, 1.7100, 4.6000, 0.0000, 280.0000
"""
def func(*date_cols):
return lib.try_parse_dates(parsers._concat_date_cols(date_cols))
df = self.read_csv(StringIO(data), header=None,
date_parser=func,
prefix='X',
parse_dates={'nominal': [1, 2],
'actual': [1, 3]})
assert 'nominal' in df
assert 'actual' in df
assert 'X1' not in df
assert 'X2' not in df
assert 'X3' not in df
d = datetime(1999, 1, 27, 19, 0)
assert df.loc[0, 'nominal'] == d
df = self.read_csv(StringIO(data), header=None,
date_parser=func,
parse_dates={'nominal': [1, 2],
'actual': [1, 3]},
keep_date_col=True)
assert 'nominal' in df
assert 'actual' in df
assert 1 in df
assert 2 in df
assert 3 in df
data = """\
KORD,19990127, 19:00:00, 18:56:00, 0.8100, 2.8100, 7.2000, 0.0000, 280.0000
KORD,19990127, 20:00:00, 19:56:00, 0.0100, 2.2100, 7.2000, 0.0000, 260.0000
KORD,19990127, 21:00:00, 20:56:00, -0.5900, 2.2100, 5.7000, 0.0000, 280.0000
KORD,19990127, 21:00:00, 21:18:00, -0.9900, 2.0100, 3.6000, 0.0000, 270.0000
KORD,19990127, 22:00:00, 21:56:00, -0.5900, 1.7100, 5.1000, 0.0000, 290.0000
KORD,19990127, 23:00:00, 22:56:00, -0.5900, 1.7100, 4.6000, 0.0000, 280.0000
"""
df = self.read_csv(StringIO(data), header=None,
prefix='X', parse_dates=[[1, 2], [1, 3]])
assert 'X1_X2' in df
assert 'X1_X3' in df
assert 'X1' not in df
assert 'X2' not in df
assert 'X3' not in df
d = datetime(1999, 1, 27, 19, 0)
assert df.loc[0, 'X1_X2'] == d
df = self.read_csv(StringIO(data), header=None,
parse_dates=[[1, 2], [1, 3]], keep_date_col=True)
assert '1_2' in df
assert '1_3' in df
assert 1 in df
assert 2 in df
assert 3 in df
data = '''\
KORD,19990127 19:00:00, 18:56:00, 0.8100, 2.8100, 7.2000, 0.0000, 280.0000
KORD,19990127 20:00:00, 19:56:00, 0.0100, 2.2100, 7.2000, 0.0000, 260.0000
KORD,19990127 21:00:00, 20:56:00, -0.5900, 2.2100, 5.7000, 0.0000, 280.0000
KORD,19990127 21:00:00, 21:18:00, -0.9900, 2.0100, 3.6000, 0.0000, 270.0000
KORD,19990127 22:00:00, 21:56:00, -0.5900, 1.7100, 5.1000, 0.0000, 290.0000
'''
df = self.read_csv(StringIO(data), sep=',', header=None,
parse_dates=[1], index_col=1)
d = datetime(1999, 1, 27, 19, 0)
assert df.index[0] == d
def test_multiple_date_cols_int_cast(self):
data = ("KORD,19990127, 19:00:00, 18:56:00, 0.8100\n"
"KORD,19990127, 20:00:00, 19:56:00, 0.0100\n"
"KORD,19990127, 21:00:00, 20:56:00, -0.5900\n"
"KORD,19990127, 21:00:00, 21:18:00, -0.9900\n"
"KORD,19990127, 22:00:00, 21:56:00, -0.5900\n"
"KORD,19990127, 23:00:00, 22:56:00, -0.5900")
date_spec = {'nominal': [1, 2], 'actual': [1, 3]}
import pandas.io.date_converters as conv
# it works!
df = self.read_csv(StringIO(data), header=None, parse_dates=date_spec,
date_parser=conv.parse_date_time)
assert 'nominal' in df
def test_multiple_date_col_timestamp_parse(self):
data = """05/31/2012,15:30:00.029,1306.25,1,E,0,,1306.25
05/31/2012,15:30:00.029,1306.25,8,E,0,,1306.25"""
result = self.read_csv(StringIO(data), sep=',', header=None,
parse_dates=[[0, 1]], date_parser=Timestamp)
ex_val = Timestamp('05/31/2012 15:30:00.029')
assert result['0_1'][0] == ex_val
def test_multiple_date_cols_with_header(self):
data = """\
ID,date,NominalTime,ActualTime,TDew,TAir,Windspeed,Precip,WindDir
KORD,19990127, 19:00:00, 18:56:00, 0.8100, 2.8100, 7.2000, 0.0000, 280.0000
KORD,19990127, 20:00:00, 19:56:00, 0.0100, 2.2100, 7.2000, 0.0000, 260.0000
KORD,19990127, 21:00:00, 20:56:00, -0.5900, 2.2100, 5.7000, 0.0000, 280.0000
KORD,19990127, 21:00:00, 21:18:00, -0.9900, 2.0100, 3.6000, 0.0000, 270.0000
KORD,19990127, 22:00:00, 21:56:00, -0.5900, 1.7100, 5.1000, 0.0000, 290.0000
KORD,19990127, 23:00:00, 22:56:00, -0.5900, 1.7100, 4.6000, 0.0000, 280.0000"""
df = self.read_csv(StringIO(data), parse_dates={'nominal': [1, 2]})
assert not isinstance(df.nominal[0], compat.string_types)
ts_data = """\
ID,date,nominalTime,actualTime,A,B,C,D,E
KORD,19990127, 19:00:00, 18:56:00, 0.8100, 2.8100, 7.2000, 0.0000, 280.0000
KORD,19990127, 20:00:00, 19:56:00, 0.0100, 2.2100, 7.2000, 0.0000, 260.0000
KORD,19990127, 21:00:00, 20:56:00, -0.5900, 2.2100, 5.7000, 0.0000, 280.0000
KORD,19990127, 21:00:00, 21:18:00, -0.9900, 2.0100, 3.6000, 0.0000, 270.0000
KORD,19990127, 22:00:00, 21:56:00, -0.5900, 1.7100, 5.1000, 0.0000, 290.0000
KORD,19990127, 23:00:00, 22:56:00, -0.5900, 1.7100, 4.6000, 0.0000, 280.0000
"""
def test_multiple_date_col_name_collision(self):
with pytest.raises(ValueError):
self.read_csv(StringIO(self.ts_data), parse_dates={'ID': [1, 2]})
data = """\
date_NominalTime,date,NominalTime,ActualTime,TDew,TAir,Windspeed,Precip,WindDir
KORD1,19990127, 19:00:00, 18:56:00, 0.8100, 2.8100, 7.2000, 0.0000, 280.0000
KORD2,19990127, 20:00:00, 19:56:00, 0.0100, 2.2100, 7.2000, 0.0000, 260.0000
KORD3,19990127, 21:00:00, 20:56:00, -0.5900, 2.2100, 5.7000, 0.0000, 280.0000
KORD4,19990127, 21:00:00, 21:18:00, -0.9900, 2.0100, 3.6000, 0.0000, 270.0000
KORD5,19990127, 22:00:00, 21:56:00, -0.5900, 1.7100, 5.1000, 0.0000, 290.0000
KORD6,19990127, 23:00:00, 22:56:00, -0.5900, 1.7100, 4.6000, 0.0000, 280.0000""" # noqa
with pytest.raises(ValueError):
self.read_csv(StringIO(data), parse_dates=[[1, 2]])
def test_date_parser_int_bug(self):
# See gh-3071
log_file = StringIO(
'posix_timestamp,elapsed,sys,user,queries,query_time,rows,'
'accountid,userid,contactid,level,silo,method\n'
'1343103150,0.062353,0,4,6,0.01690,3,'
'12345,1,-1,3,invoice_InvoiceResource,search\n'
)
def f(posix_string):
return datetime.utcfromtimestamp(int(posix_string))
# it works!
self.read_csv(log_file, index_col=0, parse_dates=[0], date_parser=f)
def test_nat_parse(self):
# See gh-3062
df = DataFrame(dict({
'A': np.asarray(lrange(10), dtype='float64'),
'B': pd.Timestamp('20010101')}))
df.iloc[3:6, :] = np.nan
with tm.ensure_clean('__nat_parse_.csv') as path:
df.to_csv(path)
result = self.read_csv(path, index_col=0, parse_dates=['B'])
tm.assert_frame_equal(result, df)
expected = Series(dict(A='float64', B='datetime64[ns]'))
tm.assert_series_equal(expected, result.dtypes)
# test with NaT for the nan_rep
# we don't have a method to specif the Datetime na_rep (it defaults
# to '')
df.to_csv(path)
result = self.read_csv(path, index_col=0, parse_dates=['B'])
tm.assert_frame_equal(result, df)
def test_csv_custom_parser(self):
data = """A,B,C
20090101,a,1,2
20090102,b,3,4
20090103,c,4,5
"""
f = lambda x: datetime.strptime(x, '%Y%m%d')
df = self.read_csv(StringIO(data), date_parser=f)
expected = self.read_csv(StringIO(data), parse_dates=True)
tm.assert_frame_equal(df, expected)
def test_parse_dates_implicit_first_col(self):
data = """A,B,C
20090101,a,1,2
20090102,b,3,4
20090103,c,4,5
"""
df = self.read_csv(StringIO(data), parse_dates=True)
expected = self.read_csv(StringIO(data), index_col=0, parse_dates=True)
assert isinstance(
df.index[0], (datetime, np.datetime64, Timestamp))
tm.assert_frame_equal(df, expected)
def test_parse_dates_string(self):
data = """date,A,B,C
20090101,a,1,2
20090102,b,3,4
20090103,c,4,5
"""
rs = self.read_csv(
StringIO(data), index_col='date', parse_dates=['date'])
idx = date_range('1/1/2009', periods=3)
idx.name = 'date'
xp = DataFrame({'A': ['a', 'b', 'c'],
'B': [1, 3, 4],
'C': [2, 4, 5]}, idx)
tm.assert_frame_equal(rs, xp)
def test_yy_format_with_yearfirst(self):
data = """date,time,B,C
090131,0010,1,2
090228,1020,3,4
090331,0830,5,6
"""
# See gh-217
import dateutil
if dateutil.__version__ >= LooseVersion('2.5.0'):
pytest.skip("testing yearfirst=True not-support"
"on datetutil < 2.5.0 this works but"
"is wrong")
rs = self.read_csv(StringIO(data), index_col=0,
parse_dates=[['date', 'time']])
idx = DatetimeIndex([datetime(2009, 1, 31, 0, 10, 0),
datetime(2009, 2, 28, 10, 20, 0),
datetime(2009, 3, 31, 8, 30, 0)],
dtype=object, name='date_time')
xp = DataFrame({'B': [1, 3, 5], 'C': [2, 4, 6]}, idx)
tm.assert_frame_equal(rs, xp)
rs = self.read_csv(StringIO(data), index_col=0,
parse_dates=[[0, 1]])
idx = DatetimeIndex([datetime(2009, 1, 31, 0, 10, 0),
datetime(2009, 2, 28, 10, 20, 0),
datetime(2009, 3, 31, 8, 30, 0)],
dtype=object, name='date_time')
xp = DataFrame({'B': [1, 3, 5], 'C': [2, 4, 6]}, idx)
tm.assert_frame_equal(rs, xp)
def test_parse_dates_column_list(self):
data = 'a,b,c\n01/01/2010,1,15/02/2010'
expected = DataFrame({'a': [datetime(2010, 1, 1)], 'b': [1],
'c': [datetime(2010, 2, 15)]})
expected = expected.set_index(['a', 'b'])
df = self.read_csv(StringIO(data), index_col=[0, 1],
parse_dates=[0, 2], dayfirst=True)
tm.assert_frame_equal(df, expected)
df = self.read_csv(StringIO(data), index_col=[0, 1],
parse_dates=['a', 'c'], dayfirst=True)
tm.assert_frame_equal(df, expected)
def test_multi_index_parse_dates(self):
data = """index1,index2,A,B,C
20090101,one,a,1,2
20090101,two,b,3,4
20090101,three,c,4,5
20090102,one,a,1,2
20090102,two,b,3,4
20090102,three,c,4,5
20090103,one,a,1,2
20090103,two,b,3,4
20090103,three,c,4,5
"""
df = self.read_csv(StringIO(data), index_col=[0, 1], parse_dates=True)
assert isinstance(df.index.levels[0][0],
(datetime, np.datetime64, Timestamp))
# specify columns out of order!
df2 = self.read_csv(StringIO(data), index_col=[1, 0], parse_dates=True)
assert isinstance(df2.index.levels[1][0],
(datetime, np.datetime64, Timestamp))
def test_parse_dates_custom_euroformat(self):
text = """foo,bar,baz
31/01/2010,1,2
01/02/2010,1,NA
02/02/2010,1,2
"""
parser = lambda d: parse_date(d, dayfirst=True)
df = self.read_csv(StringIO(text),
names=['time', 'Q', 'NTU'], header=0,
index_col=0, parse_dates=True,
date_parser=parser, na_values=['NA'])
exp_index = Index([datetime(2010, 1, 31), datetime(2010, 2, 1),
datetime(2010, 2, 2)], name='time')
expected = DataFrame({'Q': [1, 1, 1], 'NTU': [2, np.nan, 2]},
index=exp_index, columns=['Q', 'NTU'])
tm.assert_frame_equal(df, expected)
parser = lambda d: parse_date(d, day_first=True)
pytest.raises(TypeError, self.read_csv,
StringIO(text), skiprows=[0],
names=['time', 'Q', 'NTU'], index_col=0,
parse_dates=True, date_parser=parser,
na_values=['NA'])
def test_parse_tz_aware(self):
# See gh-1693
import pytz
data = StringIO("Date,x\n2012-06-13T01:39:00Z,0.5")
# it works
result = self.read_csv(data, index_col=0, parse_dates=True)
stamp = result.index[0]
assert stamp.minute == 39
try:
assert result.index.tz is pytz.utc
except AssertionError: # hello Yaroslav
arr = result.index.to_pydatetime()
result = tools.to_datetime(arr, utc=True)[0]
assert stamp.minute == result.minute
assert stamp.hour == result.hour
assert stamp.day == result.day
def test_multiple_date_cols_index(self):
data = """
ID,date,NominalTime,ActualTime,TDew,TAir,Windspeed,Precip,WindDir
KORD1,19990127, 19:00:00, 18:56:00, 0.8100, 2.8100, 7.2000, 0.0000, 280.0000
KORD2,19990127, 20:00:00, 19:56:00, 0.0100, 2.2100, 7.2000, 0.0000, 260.0000
KORD3,19990127, 21:00:00, 20:56:00, -0.5900, 2.2100, 5.7000, 0.0000, 280.0000
KORD4,19990127, 21:00:00, 21:18:00, -0.9900, 2.0100, 3.6000, 0.0000, 270.0000
KORD5,19990127, 22:00:00, 21:56:00, -0.5900, 1.7100, 5.1000, 0.0000, 290.0000
KORD6,19990127, 23:00:00, 22:56:00, -0.5900, 1.7100, 4.6000, 0.0000, 280.0000
"""
xp = self.read_csv(StringIO(data), parse_dates={'nominal': [1, 2]})
df = self.read_csv(StringIO(data), parse_dates={'nominal': [1, 2]},
index_col='nominal')
tm.assert_frame_equal(xp.set_index('nominal'), df)
df2 = self.read_csv(StringIO(data), parse_dates={'nominal': [1, 2]},
index_col=0)
tm.assert_frame_equal(df2, df)
df3 = self.read_csv(StringIO(data), parse_dates=[[1, 2]], index_col=0)
tm.assert_frame_equal(df3, df, check_names=False)
def test_multiple_date_cols_chunked(self):
df = self.read_csv(StringIO(self.ts_data), parse_dates={
'nominal': [1, 2]}, index_col='nominal')
reader = self.read_csv(StringIO(self.ts_data),
parse_dates={'nominal': [1, 2]},
index_col='nominal', chunksize=2)
chunks = list(reader)
assert 'nominalTime' not in df
tm.assert_frame_equal(chunks[0], df[:2])
tm.assert_frame_equal(chunks[1], df[2:4])
tm.assert_frame_equal(chunks[2], df[4:])
def test_multiple_date_col_named_components(self):
xp = self.read_csv(StringIO(self.ts_data),
parse_dates={'nominal': [1, 2]},
index_col='nominal')
colspec = {'nominal': ['date', 'nominalTime']}
df = self.read_csv(StringIO(self.ts_data), parse_dates=colspec,
index_col='nominal')
tm.assert_frame_equal(df, xp)
def test_multiple_date_col_multiple_index(self):
df = self.read_csv(StringIO(self.ts_data),
parse_dates={'nominal': [1, 2]},
index_col=['nominal', 'ID'])
xp = self.read_csv(StringIO(self.ts_data),
parse_dates={'nominal': [1, 2]})
tm.assert_frame_equal(xp.set_index(['nominal', 'ID']), df)
def test_read_with_parse_dates_scalar_non_bool(self):
# See gh-5636
errmsg = ("Only booleans, lists, and "
"dictionaries are accepted "
"for the 'parse_dates' parameter")
data = """A,B,C
1,2,2003-11-1"""
tm.assert_raises_regex(TypeError, errmsg, self.read_csv,
StringIO(data), parse_dates="C")
tm.assert_raises_regex(TypeError, errmsg, self.read_csv,
StringIO(data), parse_dates="C",
index_col="C")
def test_read_with_parse_dates_invalid_type(self):
errmsg = ("Only booleans, lists, and "
"dictionaries are accepted "
"for the 'parse_dates' parameter")
data = """A,B,C
1,2,2003-11-1"""
tm.assert_raises_regex(TypeError, errmsg, self.read_csv,
StringIO(data), parse_dates=(1,))
tm.assert_raises_regex(TypeError, errmsg,
self.read_csv, StringIO(data),
parse_dates=np.array([4, 5]))
tm.assert_raises_regex(TypeError, errmsg, self.read_csv,
StringIO(data), parse_dates=set([1, 3, 3]))
def test_parse_dates_empty_string(self):
# see gh-2263
data = "Date, test\n2012-01-01, 1\n,2"
result = self.read_csv(StringIO(data), parse_dates=["Date"],
na_filter=False)
assert result['Date'].isna()[1]
def test_parse_dates_noconvert_thousands(self):
# see gh-14066
data = 'a\n04.15.2016'
expected = DataFrame([datetime(2016, 4, 15)], columns=['a'])
result = self.read_csv(StringIO(data), parse_dates=['a'],
thousands='.')
tm.assert_frame_equal(result, expected)
exp_index = DatetimeIndex(['2016-04-15'], name='a')
expected = DataFrame(index=exp_index)
result = self.read_csv(StringIO(data), index_col=0,
parse_dates=True, thousands='.')
tm.assert_frame_equal(result, expected)
data = 'a,b\n04.15.2016,09.16.2013'
expected = DataFrame([[datetime(2016, 4, 15),
datetime(2013, 9, 16)]],
columns=['a', 'b'])
result = self.read_csv(StringIO(data), parse_dates=['a', 'b'],
thousands='.')
tm.assert_frame_equal(result, expected)
expected = DataFrame([[datetime(2016, 4, 15),
datetime(2013, 9, 16)]],
columns=['a', 'b'])
expected = expected.set_index(['a', 'b'])
result = self.read_csv(StringIO(data), index_col=[0, 1],
parse_dates=True, thousands='.')
tm.assert_frame_equal(result, expected)
def test_parse_date_time_multi_level_column_name(self):
data = """\
D,T,A,B
date, time,a,b
2001-01-05, 09:00:00, 0.0, 10.
2001-01-06, 00:00:00, 1.0, 11.
"""
datecols = {'date_time': [0, 1]}
result = self.read_csv(StringIO(data), sep=',', header=[0, 1],
parse_dates=datecols,
date_parser=conv.parse_date_time)
expected_data = [[datetime(2001, 1, 5, 9, 0, 0), 0., 10.],
[datetime(2001, 1, 6, 0, 0, 0), 1., 11.]]
expected = DataFrame(expected_data,
columns=['date_time', ('A', 'a'), ('B', 'b')])
tm.assert_frame_equal(result, expected)
def test_parse_date_time(self):
dates = np.array(['2007/1/3', '2008/2/4'], dtype=object)
times = np.array(['05:07:09', '06:08:00'], dtype=object)
expected = np.array([datetime(2007, 1, 3, 5, 7, 9),
datetime(2008, 2, 4, 6, 8, 0)])
result = conv.parse_date_time(dates, times)
assert (result == expected).all()
data = """\
date, time, a, b
2001-01-05, 10:00:00, 0.0, 10.
2001-01-05, 00:00:00, 1., 11.
"""
datecols = {'date_time': [0, 1]}
df = self.read_csv(StringIO(data), sep=',', header=0,
parse_dates=datecols,
date_parser=conv.parse_date_time)
assert 'date_time' in df
assert df.date_time.loc[0] == datetime(2001, 1, 5, 10, 0, 0)
data = ("KORD,19990127, 19:00:00, 18:56:00, 0.8100\n"
"KORD,19990127, 20:00:00, 19:56:00, 0.0100\n"
"KORD,19990127, 21:00:00, 20:56:00, -0.5900\n"
"KORD,19990127, 21:00:00, 21:18:00, -0.9900\n"
"KORD,19990127, 22:00:00, 21:56:00, -0.5900\n"
"KORD,19990127, 23:00:00, 22:56:00, -0.5900")
date_spec = {'nominal': [1, 2], 'actual': [1, 3]}
df = self.read_csv(StringIO(data), header=None, parse_dates=date_spec,
date_parser=conv.parse_date_time)
def test_parse_date_fields(self):
years = np.array([2007, 2008])
months = np.array([1, 2])
days = np.array([3, 4])
result = conv.parse_date_fields(years, months, days)
expected = np.array([datetime(2007, 1, 3), datetime(2008, 2, 4)])
assert (result == expected).all()
data = ("year, month, day, a\n 2001 , 01 , 10 , 10.\n"
"2001 , 02 , 1 , 11.")
datecols = {'ymd': [0, 1, 2]}
df = self.read_csv(StringIO(data), sep=',', header=0,
parse_dates=datecols,
date_parser=conv.parse_date_fields)
assert 'ymd' in df
assert df.ymd.loc[0] == datetime(2001, 1, 10)
def test_datetime_six_col(self):
years = np.array([2007, 2008])
months = np.array([1, 2])
days = np.array([3, 4])
hours = np.array([5, 6])
minutes = np.array([7, 8])
seconds = np.array([9, 0])
expected = np.array([datetime(2007, 1, 3, 5, 7, 9),
datetime(2008, 2, 4, 6, 8, 0)])
result = conv.parse_all_fields(years, months, days,
hours, minutes, seconds)
assert (result == expected).all()
data = """\
year, month, day, hour, minute, second, a, b
2001, 01, 05, 10, 00, 0, 0.0, 10.
2001, 01, 5, 10, 0, 00, 1., 11.
"""
datecols = {'ymdHMS': [0, 1, 2, 3, 4, 5]}
df = self.read_csv(StringIO(data), sep=',', header=0,
parse_dates=datecols,
date_parser=conv.parse_all_fields)
assert 'ymdHMS' in df
assert df.ymdHMS.loc[0] == datetime(2001, 1, 5, 10, 0, 0)
def test_datetime_fractional_seconds(self):
data = """\
year, month, day, hour, minute, second, a, b
2001, 01, 05, 10, 00, 0.123456, 0.0, 10.
2001, 01, 5, 10, 0, 0.500000, 1., 11.
"""
datecols = {'ymdHMS': [0, 1, 2, 3, 4, 5]}
df = self.read_csv(StringIO(data), sep=',', header=0,
parse_dates=datecols,
date_parser=conv.parse_all_fields)
assert 'ymdHMS' in df
assert df.ymdHMS.loc[0] == datetime(2001, 1, 5, 10, 0, 0,
microsecond=123456)
assert df.ymdHMS.loc[1] == datetime(2001, 1, 5, 10, 0, 0,
microsecond=500000)
def test_generic(self):
data = "year, month, day, a\n 2001, 01, 10, 10.\n 2001, 02, 1, 11."
datecols = {'ym': [0, 1]}
dateconverter = lambda y, m: date(year=int(y), month=int(m), day=1)
df = self.read_csv(StringIO(data), sep=',', header=0,
parse_dates=datecols,
date_parser=dateconverter)
assert 'ym' in df
assert df.ym.loc[0] == date(2001, 1, 1)
def test_dateparser_resolution_if_not_ns(self):
# GH 10245
data = """\
date,time,prn,rxstatus
2013-11-03,19:00:00,126,00E80000
2013-11-03,19:00:00,23,00E80000
2013-11-03,19:00:00,13,00E80000
"""
def date_parser(date, time):
datetime = np_array_datetime64_compat(
date + 'T' + time + 'Z', dtype='datetime64[s]')
return datetime
df = self.read_csv(StringIO(data), date_parser=date_parser,
parse_dates={'datetime': ['date', 'time']},
index_col=['datetime', 'prn'])
datetimes = np_array_datetime64_compat(['2013-11-03T19:00:00Z'] * 3,
dtype='datetime64[s]')
df_correct = DataFrame(data={'rxstatus': ['00E80000'] * 3},
index=MultiIndex.from_tuples(
[(datetimes[0], 126),
(datetimes[1], 23),
(datetimes[2], 13)],
names=['datetime', 'prn']))
tm.assert_frame_equal(df, df_correct)
def test_parse_date_column_with_empty_string(self):
# GH 6428
data = """case,opdate
7,10/18/2006
7,10/18/2008
621, """
result = self.read_csv(StringIO(data), parse_dates=['opdate'])
expected_data = [[7, '10/18/2006'],
[7, '10/18/2008'],
[621, ' ']]
expected = DataFrame(expected_data, columns=['case', 'opdate'])
tm.assert_frame_equal(result, expected)
|
|
#!/bin/python3
import relaiscommands as rc
import reliablechoice as tplink
import serial
import time
import socketserver
import sys
DEVICE='/dev/ttyS0'
EXPECTED_CARDS=3
EXPECTED_FIRMWARES = [11]
class RelaisRequestHandler(socketserver.StreamRequestHandler):
def __init__(self, request, client_address, server):
print('New Request Handler')
socketserver.BaseRequestHandler.__init__(self, request, client_address, server)
return
def handle(self):
global cards, ser
print('Waiting for data...')
while True:
data=self.rfile.readline().strip()
if len(data) == 0:
break
if data == bytes("quit","utf-8"):
self.request.send(bytes('200 Bye\n',"utf-8"))
break
cmd=data.decode('utf-8').split()
if len(cmd) == 0:
self.request.send(bytes('500 No comand\n',"utf-8"))
continue
########### QUERY STATE
if cmd[0] == 'state':
reply="200 "
(result,msg,state)=rc.getPortState(cards,ser)
if not result:
self.request.send(bytes('400 Error getting port state. '+str(msg)+'\n',"utf-8"))
continue
for port in state:
reply+=(str(port)+", ")
reply=reply[:-2]+'\n'
self.request.send(bytes(reply,'utf-8'))
########### SWITCH ON
elif cmd[0] == 'on':
if len(cmd) != 2:
self.request.send(bytes('500 Wrong numebr of arguments. 1 needed\n',"utf-8"))
continue
try:
nr=int(cmd[1])
except ValueError:
self.request.send(bytes('500 Argument must be an int\n',"utf-8"))
continue
(res,msg)=rc.relaisOn(nr,cards, ser)
if not res:
self.request.send(bytes('400 Error switching port on. '+str(msg)+'\n',"utf-8"))
continue
self.request.send(bytes('200 OK\n',"utf-8"))
########### SWITCH OFF
elif cmd[0] == 'off':
if len(cmd) != 2:
self.request.send(bytes('500 Wrong numebr of arguments. 1 needed\n',"utf-8"))
continue
try:
nr=int(cmd[1])
except ValueError:
self.request.send(bytes('500 Argument must be an int\n',"utf-8"))
continue
(res,msg)=rc.relaisOff(nr,cards, ser)
if not res:
self.request.send(bytes('400 Error switching port off. '+str(msg)+'\n',"utf-8"))
continue
self.request.send(bytes('200 OK\n',"utf-8"))
########### TEST
elif cmd[0] == 'test':
for i in range(0,len(cards)*8):
(res,msg)=rc.relaisOn(i,cards,ser)
if not res:
self.request.send(bytes('400 Test failed at '+str(i)+'. '+str(msg)+'\n',"utf-8"))
continue
time.sleep(0.1)
for i in range(0,len(cards)*8):
(res,msg)=rc.relaisOff(i,cards,ser)
if not res:
self.request.send(bytes('400 Test failed at '+str(i)+'. '+str(msg)+'\n',"utf-8"))
continue
time.sleep(0.1)
self.request.send(bytes('200 OK\n',"utf-8"))
########### TPLINK status
elif cmd[0] == 'tpstate':
(enabled,link)=tplink.getState()
#unify list: first bit=enabled, second bit=link
if len(enabled) == 0 or len(link)==0:
self.request.send(bytes('400 Invalid data received','utf-8'))
continue
print("Enabled: "+str(enabled))
print("Link: "+str(link))
for port in range(0,len(enabled)):
if int(link[port]) != 0:
enabled[port]=int(enabled[port])+2
reply="200 "
for port in enabled:
reply+=(str(port)+", ")
reply=reply[:-2]+'\n'
self.request.send(bytes(reply,'utf-8'))
########### SWITCH ON
elif cmd[0] == 'tpon':
if len(cmd) != 2:
self.request.send(bytes('500 Wrong numebr of arguments. 1 needed\n',"utf-8"))
continue
try:
nr=int(cmd[1])
except ValueError:
self.request.send(bytes('500 Argument must be an int\n',"utf-8"))
continue
res=tplink.setPort(nr,True)
if not res:
self.request.send(bytes('400 Error switching port on.\n',"utf-8"))
continue
self.request.send(bytes('200 OK\n',"utf-8"))
########### SWITCH OFF
elif cmd[0] == 'tpoff':
if len(cmd) != 2:
self.request.send(bytes('500 Wrong numebr of arguments. 1 needed\n',"utf-8"))
continue
try:
nr=int(cmd[1])
except ValueError:
self.request.send(bytes('500 Argument must be an int\n',"utf-8"))
continue
res=tplink.setPort(nr,False)
if not res:
self.request.send(bytes('400 Error switching port off.\n',"utf-8"))
continue
self.request.send(bytes('200 OK\n',"utf-8"))
########### UNKNOWN CMD
else:
self.request.send(bytes('500 Unknown comand: '+str(cmd[0])+'\n',"utf-8"))
return
def finish(self):
print('Request Handler finish')
return socketserver.BaseRequestHandler.finish(self)
class RelaisServer(socketserver.ThreadingMixIn, socketserver.TCPServer):
allow_reuse_address = True
def __init__(self, server_address, handler_class=RelaisRequestHandler):
print('Init server')
socketserver.TCPServer.__init__(self, server_address, handler_class)
return
def serve_forever(self):
print('Handling requests, press <Ctrl-C> to quit')
while True:
self.handle_request()
return
def server_close(self):
print('server_close')
return socketserver.TCPServer.server_close(self)
def finish_request(self, request, client_address):
print('finish_request(%s, %s)', request, client_address)
return socketserver.TCPServer.finish_request(self, request, client_address)
def close_request(self, request_address):
print('close_request(%s)', request_address)
return socketserver.TCPServer.close_request(self, request_address)
global ser
ser = serial.Serial(DEVICE, 19200, timeout=2)
global cards
cards=rc.setup(ser)
print("Detected boards:")
for card in cards:
print("Found card "+str(card['address'])+" with firmware version "+str(card['firmware'])+". Checksum correct?: "+str(card['xorok']))
# Sanity checks, and exit if something is wrong
if len(cards) != EXPECTED_CARDS:
print("Expected "+str(EXPECTED_CARDS)+" but found "+str(len(cards))+". Exiting.")
sys.exit(-1)
for card in cards:
if card['firmware'] not in EXPECTED_FIRMWARES:
print("Found unexpected firmware "+str(card['firmware'])+". Exiting.")
sys.exit(-2)
print("Starting server")
address = ('127.0.0.1', 2222) # let the kernel give us a port
server = RelaisServer(address, RelaisRequestHandler)
server.serve_forever()
(res,msg,state)=rc.getPortState(cards,ser)
if not res:
print("Error getting state"+str(msg))
else:
print("Ports "+str(state))
for i in range(0,16):
(res,msg)=rc.relaisOn(i,ser)
#print("Result "+str(res)+": "+msg)
print("State "+str(rc.getPortState(cards,ser)[2]))
time.sleep(0.5)
(res,msg,state)=rc.getPortState(cards,ser)
if not res:
print("Error getting state"+str(msg))
else:
print("Ports "+str(state))
for i in range(0,16):
(res,msg)=rc.relaisOff(i,ser)
print("Result "+str(res)+": "+msg)
time.sleep(0.5)
(res,msg,state)=rc.getPortState(cards,ser)
if not res:
print("Error getting state"+str(msg))
else:
print("Ports "+str(state))
#(res,msg)=rc.relaisOff(0,ser)
#print("Result "+str(res)+": "+msg)
#(res,msg)=rc.relaisOn(0,ser)
#print("Result "+str(res)+": "+msg)
|
|
import calendar
import time
from django.conf import settings
from django.conf.urls.defaults import url
from django.core.exceptions import ObjectDoesNotExist
import commonware.log
from rest_framework.decorators import api_view, permission_classes
from rest_framework.permissions import AllowAny
from rest_framework.response import Response
from tastypie import fields, http
from tastypie.exceptions import ImmediateHttpResponse
from tastypie.validation import CleanedDataFormValidation
import amo
from amo.helpers import absolutify, urlparams
from amo.urlresolvers import reverse
from amo.utils import send_mail_jinja
from constants.payments import PROVIDER_LOOKUP
from mkt.api.authentication import (OAuthAuthentication,
OptionalOAuthAuthentication,
SharedSecretAuthentication)
from mkt.api.authorization import (AnonymousReadOnlyAuthorization,
Authorization, OwnerAuthorization,
PermissionAuthorization)
from mkt.api.base import (CORSResource, GenericObject, http_error,
MarketplaceModelResource, MarketplaceResource)
from mkt.webpay.forms import FailureForm, PrepareForm, ProductIconForm
from mkt.webpay.models import ProductIcon
from mkt.purchase.webpay import _prepare_pay, sign_webpay_jwt
from mkt.purchase.utils import payments_enabled
from market.models import Price, price_locale
from stats.models import Contribution
from . import tasks
log = commonware.log.getLogger('z.webpay')
class PreparePayResource(CORSResource, MarketplaceResource):
webpayJWT = fields.CharField(attribute='webpayJWT', readonly=True)
contribStatusURL = fields.CharField(attribute='contribStatusURL',
readonly=True)
class Meta(MarketplaceResource.Meta):
always_return_data = True
authentication = (SharedSecretAuthentication(), OAuthAuthentication())
authorization = Authorization()
detail_allowed_methods = []
list_allowed_methods = ['post']
object_class = GenericObject
resource_name = 'prepare'
validation = CleanedDataFormValidation(form_class=PrepareForm)
def obj_create(self, bundle, request, **kwargs):
region = getattr(request, 'REGION', None)
app = bundle.data['app']
if region and region.id not in app.get_price_region_ids():
log.info('Region {0} is not in {1}'
.format(region.id, app.get_price_region_ids()))
if payments_enabled(request):
log.info('Flag not active')
raise http_error(http.HttpForbidden,
'Payments are limited and flag not enabled')
bundle.obj = GenericObject(_prepare_pay(request, bundle.data['app']))
return bundle
class StatusPayResource(CORSResource, MarketplaceModelResource):
class Meta(MarketplaceModelResource.Meta):
always_return_data = True
authentication = (SharedSecretAuthentication(), OAuthAuthentication())
authorization = OwnerAuthorization()
detail_allowed_methods = ['get']
queryset = Contribution.objects.filter(type=amo.CONTRIB_PURCHASE)
resource_name = 'status'
def obj_get(self, request=None, **kw):
try:
obj = super(StatusPayResource, self).obj_get(request=request, **kw)
except ObjectDoesNotExist:
# Anything that's not correct will be raised as a 404 so that it's
# harder to iterate over contribution values.
log.info('Contribution not found')
return None
if not OwnerAuthorization().is_authorized(request, object=obj):
raise http_error(http.HttpForbidden,
'You are not an author of that app.')
if not obj.addon.has_purchased(request.amo_user):
log.info('Not in AddonPurchase table')
return None
return obj
def base_urls(self):
return [
url(r"^(?P<resource_name>%s)/(?P<uuid>[^/]+)/$" %
self._meta.resource_name,
self.wrap_view('dispatch_detail'),
name='api_dispatch_detail')
]
def full_dehydrate(self, bundle):
bundle.data = {'status': 'complete' if bundle.obj.id else 'incomplete'}
return bundle
class PriceResource(CORSResource, MarketplaceModelResource):
prices = fields.ListField(attribute='prices', readonly=True)
localized = fields.DictField(attribute='suggested', readonly=True,
blank=True, null=True)
pricePoint = fields.CharField(attribute='name', readonly=True)
name = fields.CharField(attribute='tier_name', readonly=True)
class Meta:
detail_allowed_methods = ['get']
filtering = {'pricePoint': 'exact'}
include_resource_uri = False
list_allowed_methods = ['get']
queryset = Price.objects.filter(active=True).order_by('price')
resource_name = 'prices'
def _get_prices(self, bundle):
"""Both localized and prices need access to this. """
provider = bundle.request.GET.get('provider', None)
if provider:
provider = PROVIDER_LOOKUP[provider]
return bundle.obj.prices(provider=provider)
def dehydrate_localized(self, bundle):
region = bundle.request.REGION
for price in self._get_prices(bundle):
if price['region'] == region.id:
result = price.copy()
result.update({
'locale': price_locale(price['price'], price['currency']),
'region': region.name,
})
return result
return {}
def dehydrate_prices(self, bundle):
return self._get_prices(bundle)
class FailureNotificationResource(MarketplaceModelResource):
class Meta:
authentication = OAuthAuthentication()
authorization = PermissionAuthorization('Transaction', 'NotifyFailure')
detail_allowed_methods = ['patch']
queryset = Contribution.objects.filter(uuid__isnull=False)
resource_name = 'failure'
def obj_update(self, bundle, **kw):
form = FailureForm(bundle.data)
if not form.is_valid():
raise self.form_errors(form)
data = {'transaction_id': bundle.obj,
'transaction_url': absolutify(
urlparams(reverse('mkt.developers.transactions'),
transaction_id=bundle.obj.uuid)),
'url': form.cleaned_data['url'],
'retries': form.cleaned_data['attempts']}
owners = bundle.obj.addon.authors.values_list('email', flat=True)
send_mail_jinja('Payment notification failure.',
'webpay/failure.txt',
data, recipient_list=owners)
return bundle
class ProductIconResource(CORSResource, MarketplaceModelResource):
url = fields.CharField(readonly=True)
class Meta(MarketplaceResource.Meta):
authentication = OptionalOAuthAuthentication()
authorization = AnonymousReadOnlyAuthorization(
authorizer=PermissionAuthorization('ProductIcon', 'Create'))
detail_allowed_methods = ['get']
fields = ['ext_url', 'ext_size', 'size']
filtering = {
'ext_url': 'exact',
'ext_size': 'exact',
'size': 'exact',
}
list_allowed_methods = ['get', 'post']
queryset = ProductIcon.objects.filter()
resource_name = 'product/icon'
validation = CleanedDataFormValidation(form_class=ProductIconForm)
def dehydrate_url(self, bundle):
return bundle.obj.url()
def obj_create(self, bundle, request, **kwargs):
log.info('Resizing product icon %s @ %s to %s for webpay'
% (bundle.data['ext_url'], bundle.data['ext_size'],
bundle.data['size']))
tasks.fetch_product_icon.delay(bundle.data['ext_url'],
bundle.data['ext_size'],
bundle.data['size'])
# Tell the client that deferred processing will create an object.
raise ImmediateHttpResponse(response=http.HttpAccepted())
@api_view(['POST'])
@permission_classes((AllowAny,))
def sig_check(request):
"""
Returns a signed JWT to use for signature checking.
This is for Nagios checks to ensure that Marketplace's
signed tokens are valid when processed by Webpay.
"""
issued_at = calendar.timegm(time.gmtime())
req = {
'iss': settings.APP_PURCHASE_KEY,
'typ': settings.SIG_CHECK_TYP,
'aud': settings.APP_PURCHASE_AUD,
'iat': issued_at,
'exp': issued_at + 3600, # expires in 1 hour
'request': {}
}
return Response({'sig_check_jwt': sign_webpay_jwt(req)},
status=201)
|
|
#!/usr/local/bin/env python
#=============================================================================================
# MODULE DOCSTRING
#=============================================================================================
"""
Test all test systems on different platforms to ensure differences in potential energy and
forces are small among platforms.
DESCRIPTION
COPYRIGHT
@author John D. Chodera <jchodera@gmail.com>
All code in this repository is released under the MIT License.
This program is free software: you can redistribute it and/or modify it under
the terms of the MIT License.
This program is distributed in the hope that it will be useful, but WITHOUT ANY
WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A
PARTICULAR PURPOSE. See the MIT License for more details.
You should have received a copy of the MIT License along with this program.
TODO
"""
#=============================================================================================
# PYTHON 3 COMPATIBILITY CRAP
#=============================================================================================
from __future__ import print_function
#=============================================================================================
# ENABLE LOGGING
#=============================================================================================
import logging
logger = logging.getLogger(__name__)
def config_root_logger(verbose, log_file_path=None, mpicomm=None):
"""Setup the the root logger's configuration.
The log messages are printed in the terminal and saved in the file specified
by log_file_path (if not None) and printed. Note that logging use sys.stdout
to print logging.INFO messages, and stderr for the others. The root logger's
configuration is inherited by the loggers created by logging.getLogger(name).
Different formats are used to display messages on the terminal and on the log
file. For example, in the log file every entry has a timestamp which does not
appear in the terminal. Moreover, the log file always shows the module that
generate the message, while in the terminal this happens only for messages
of level WARNING and higher.
Parameters
----------
verbose : bool
Control the verbosity of the messages printed in the terminal. The logger
displays messages of level logging.INFO and higher when verbose=False.
Otherwise those of level logging.DEBUG and higher are printed.
log_file_path : str, optional, default = None
If not None, this is the path where all the logger's messages of level
logging.DEBUG or higher are saved.
mpicomm : mpi4py.MPI.COMM communicator, optional, default=None
If specified, this communicator will be used to determine node rank.
"""
class TerminalFormatter(logging.Formatter):
"""
Simplified format for INFO and DEBUG level log messages.
This allows to keep the logging.info() and debug() format separated from
the other levels where more information may be needed. For example, for
warning and error messages it is convenient to know also the module that
generates them.
"""
# This is the cleanest way I found to make the code compatible with both
# Python 2 and Python 3
simple_fmt = logging.Formatter('%(message)s')
default_fmt = logging.Formatter('%(levelname)s - %(name)s - %(message)s')
def format(self, record):
if record.levelno <= logging.INFO:
return self.simple_fmt.format(record)
else:
return self.default_fmt.format(record)
# Check if root logger is already configured
n_handlers = len(logging.root.handlers)
if n_handlers > 0:
root_logger = logging.root
for i in range(n_handlers):
root_logger.removeHandler(root_logger.handlers[0])
# If this is a worker node, don't save any log file
if mpicomm:
rank = mpicomm.rank
else:
rank = 0
if rank != 0:
log_file_path = None
# Add handler for stdout and stderr messages
terminal_handler = logging.StreamHandler()
terminal_handler.setFormatter(TerminalFormatter())
if rank != 0:
terminal_handler.setLevel(logging.WARNING)
elif verbose:
terminal_handler.setLevel(logging.DEBUG)
else:
terminal_handler.setLevel(logging.INFO)
logging.root.addHandler(terminal_handler)
# Add file handler to root logger
if log_file_path is not None:
#file_format = '%(asctime)s - %(levelname)s - %(name)s - %(message)s'
file_format = '%(asctime)s: %(message)s'
file_handler = logging.FileHandler(log_file_path)
file_handler.setLevel(logging.DEBUG)
file_handler.setFormatter(logging.Formatter(file_format))
logging.root.addHandler(file_handler)
# Do not handle logging.DEBUG at all if unnecessary
if log_file_path is not None:
logging.root.setLevel(logging.DEBUG)
else:
logging.root.setLevel(terminal_handler.level)
#=============================================================================================
# GLOBAL IMPORTS
#=============================================================================================
import os
import os.path
import sys
import math
try:
import openmm
import openmm.unit as units
except ImportError: # OpenMM < 7.6
import simtk.unit as units
import simtk.openmm as openmm
from openmmtools import testsystems
#=============================================================================================
# SUBROUTINES
#=============================================================================================
# These settings control what tolerance is allowed between platforms and the Reference platform.
ENERGY_TOLERANCE = 0.06*units.kilocalories_per_mole # energy difference tolerance
FORCE_RMSE_TOLERANCE = 0.06*units.kilocalories_per_mole/units.angstrom # per-particle force root-mean-square error tolerance
def assert_approximately_equal(computed_potential, expected_potential, tolerance=ENERGY_TOLERANCE):
"""
Check whether computed potential is acceptably close to expected value, using an error tolerance.
ARGUMENTS
computed_potential (openmm.unit.Quantity in units of energy) - computed potential energy
expected_potential (openmm.unit.Quantity in units of energy) - expected
OPTIONAL ARGUMENTS
tolerance (openmm.unit.Quantity in units of energy) - acceptable tolerance
EXAMPLES
>>> assert_approximately_equal(0.0000 * units.kilocalories_per_mole, 0.0001 * units.kilocalories_per_mole, tolerance=0.06*units.kilocalories_per_mole)
"""
# Compute error.
error = (computed_potential - expected_potential)
# Raise an exception if the error is larger than the tolerance.
if abs(error) > tolerance:
raise Exception("Computed potential %s, expected %s. Error %s is larger than acceptable tolerance of %s." % (computed_potential, expected_potential, error, tolerance))
return
def compute_potential_and_force(system, positions, platform):
"""
Compute the energy and force for the given system and positions in the designated platform.
ARGUMENTS
system (openmm.System) - the system for which the energy is to be computed
positions (openmm.unit.Quantity of Nx3 numpy.array in units of distance) - positions for which energy and force are to be computed
platform (openmm.Platform) - platform object to be used to compute the energy and force
RETURNS
potential (openmm.unit.Quantity in energy/mole) - the potential
force (openmm.unit.Quantity of Nx3 numpy.array in units of energy/mole/distance) - the force
"""
# Create a Context.
kB = units.BOLTZMANN_CONSTANT_kB
temperature = 298.0 * units.kelvin
kT = kB * temperature
beta = 1.0 / kT
collision_rate = 90.0 / units.picosecond
timestep = 1.0 * units.femtosecond
integrator = openmm.LangevinIntegrator(temperature, collision_rate, timestep)
context = openmm.Context(system, integrator, platform)
# Set positions
context.setPositions(positions)
# Evaluate the potential energy.
state = context.getState(getEnergy=True, getForces=True)
potential = state.getPotentialEnergy()
force = state.getForces(asNumpy=True)
return [potential, force]
def compute_potential_and_force_by_force_index(system, positions, platform, force_index):
"""
Compute the energy and force for the given system and positions in the designated platform for the given force index.
ARGUMENTS
system (openmm.System) - the system for which the energy is to be computed
positions (openmm.unit.Quantity of Nx3 numpy.array in units of distance) - positions for which energy and force are to be computed
platform (openmm.Platform) - platform object to be used to compute the energy and force
force_index (int) - index of force to be computed (all others ignored)
RETURNS
potential (openmm.unit.Quantity in energy/mole) - the potential
force (openmm.unit.Quantity of Nx3 numpy.array in units of energy/mole/distance) - the force
"""
forces = [ system.getForce(index) for index in range(system.getNumForces()) ]
# Get original force groups.
groups = [ force.getForceGroup() for force in forces ]
# Set force groups so only specified force_index contributes.
for force in forces:
force.setForceGroup(1)
forces[force_index].setForceGroup(0) # bitmask of 1 should select only desired force
# Create a Context.
kB = units.BOLTZMANN_CONSTANT_kB
temperature = 298.0 * units.kelvin
kT = kB * temperature
beta = 1.0 / kT
collision_rate = 90.0 / units.picosecond
timestep = 1.0 * units.femtosecond
integrator = openmm.LangevinIntegrator(temperature, collision_rate, timestep)
context = openmm.Context(system, integrator, platform)
# Set positions
context.setPositions(positions)
# Evaluate the potential energy.
state = context.getState(getEnergy=True, getForces=True, groups=1)
potential = state.getPotentialEnergy()
force = state.getForces(asNumpy=True)
# Restore original force groups.
for index in range(system.getNumForces()):
forces[index].setForceGroup(groups[index])
return [potential, force]
def compute_potential_and_force_by_force_group(system, positions, platform, force_group):
"""
Compute the energy and force for the given system and positions in the designated platform for the given force group.
ARGUMENTS
system (openmm.System) - the system for which the energy is to be computed
positions (openmm.unit.Quantity of Nx3 numpy.array in units of distance) - positions for which energy and force are to be computed
platform (openmm.Platform) - platform object to be used to compute the energy and force
force_group (int) - index of force group to be computed (all others ignored)
RETURNS
potential (openmm.unit.Quantity in energy/mole) - the potential
force (openmm.unit.Quantity of Nx3 numpy.array in units of energy/mole/distance) - the force
"""
forces = [ system.getForce(index) for index in range(system.getNumForces()) ]
# Create a Context.
kB = units.BOLTZMANN_CONSTANT_kB
temperature = 298.0 * units.kelvin
kT = kB * temperature
beta = 1.0 / kT
collision_rate = 90.0 / units.picosecond
timestep = 1.0 * units.femtosecond
integrator = openmm.LangevinIntegrator(temperature, collision_rate, timestep)
context = openmm.Context(system, integrator, platform)
# Set positions
context.setPositions(positions)
# Evaluate the potential energy.
groupmask = 1 << (force_group + 1)
state = context.getState(getEnergy=True, getForces=True, groups=groupmask)
potential = state.getPotentialEnergy()
force = state.getForces(asNumpy=True)
return [potential, force]
def get_all_subclasses(cls):
"""
Return all subclasses of a specified class.
Parameters
----------
cls : class
The class for which all subclasses are to be returned.
Returns
-------
all_subclasses : list of class
List of all subclasses of `cls`.
"""
all_subclasses = []
for subclass in cls.__subclasses__():
all_subclasses.append(subclass)
all_subclasses.extend(get_all_subclasses(subclass))
return all_subclasses
#=============================================================================================
# MAIN AND TESTS
#=============================================================================================
def main():
import doctest
import argparse
parser = argparse.ArgumentParser(description="Check OpenMM computed energies and forces across all platforms for a suite of test systems.")
parser.add_argument('-o', '--outfile', dest='logfile', action='store', type=str, default=None)
parser.add_argument('-v', dest='verbose', action='store_true')
args = parser.parse_args()
verbose = args.verbose # Don't display extra debug information.
config_root_logger(verbose, log_file_path=args.logfile)
# Print version.
logger.info("OpenMM version: %s" % openmm.version.version)
logger.info("")
# List all available platforms
logger.info("Available platforms:")
for platform_index in range(openmm.Platform.getNumPlatforms()):
platform = openmm.Platform.getPlatform(platform_index)
logger.info("%5d %s" % (platform_index, platform.getName()))
logger.info("")
# Test all systems on Reference platform.
platform = openmm.Platform.getPlatformByName("Reference")
print('Testing Reference platform...')
doctest.testmod()
# Compute energy error made on all test systems for other platforms.
# Make a count of how often set tolerance is exceeded.
tests_failed = 0 # number of times tolerance is exceeded
tests_passed = 0 # number of times tolerance is not exceeded
logger.info("%16s%16s %16s %16s %16s %16s" % ("platform", "precision", "potential", "error", "force mag", "rms error"))
reference_platform = openmm.Platform.getPlatformByName("Reference")
testsystem_classes = get_all_subclasses(testsystems.TestSystem)
for testsystem_class in testsystem_classes:
class_name = testsystem_class.__name__
try:
testsystem = testsystem_class()
except ImportError as e:
logger.info(e)
logger.info("Skipping %s due to missing dependency" % class_name)
continue
# Create test system instance.
testsystem = testsystem_class()
[system, positions] = [testsystem.system, testsystem.positions]
logger.info("%s (%d atoms)" % (class_name, testsystem.system.getNumParticles()))
# Compute reference potential and force
[reference_potential, reference_force] = compute_potential_and_force(system, positions, reference_platform)
# Test all platforms.
test_success = True
for platform_index in range(openmm.Platform.getNumPlatforms()):
try:
platform = openmm.Platform.getPlatform(platform_index)
platform_name = platform.getName()
# Define precision models to test.
if platform_name == 'Reference':
precision_models = ['double']
else:
precision_models = ['single']
if platform.supportsDoublePrecision():
precision_models.append('double')
for precision_model in precision_models:
# Set precision.
if platform_name == 'CUDA':
platform.setPropertyDefaultValue('CudaPrecision', precision_model)
if platform_name == 'OpenCL':
platform.setPropertyDefaultValue('OpenCLPrecision', precision_model)
# Compute potential and force.
[platform_potential, platform_force] = compute_potential_and_force(system, positions, platform)
# Compute error in potential.
potential_error = platform_potential - reference_potential
# Compute per-atom RMS (magnitude) and RMS error in force.
force_unit = units.kilocalories_per_mole / units.nanometers
natoms = system.getNumParticles()
force_mse = (((reference_force - platform_force) / force_unit)**2).sum() / natoms * force_unit**2
force_rmse = units.sqrt(force_mse)
force_ms = ((platform_force / force_unit)**2).sum() / natoms * force_unit**2
force_rms = units.sqrt(force_ms)
logger.info("%16s%16s %16.6f kcal/mol %16.6f kcal/mol %16.6f kcal/mol/nm %16.6f kcal/mol/nm" % (platform_name, precision_model, platform_potential / units.kilocalories_per_mole, potential_error / units.kilocalories_per_mole, force_rms / force_unit, force_rmse / force_unit))
# Mark whether tolerance is exceeded or not.
if abs(potential_error) > ENERGY_TOLERANCE:
test_success = False
logger.info("%32s WARNING: Potential energy error (%.6f kcal/mol) exceeds tolerance (%.6f kcal/mol). Test failed." % ("", potential_error/units.kilocalories_per_mole, ENERGY_TOLERANCE/units.kilocalories_per_mole))
if abs(force_rmse) > FORCE_RMSE_TOLERANCE:
test_success = False
logger.info("%32s WARNING: Force RMS error (%.6f kcal/mol/nm) exceeds tolerance (%.6f kcal/mol/nm). Test failed." % ("", force_rmse/force_unit, FORCE_RMSE_TOLERANCE/force_unit))
if verbose:
for atom_index in range(natoms):
for k in range(3):
logger.info("%12.6f" % (reference_force[atom_index,k]/force_unit), end="")
logger.info(" : ", end="")
for k in range(3):
logger.info("%12.6f" % (platform_force[atom_index,k]/force_unit), end="")
except Exception as e:
logger.info(e)
if test_success:
tests_passed += 1
else:
tests_failed += 1
if (test_success is False):
# Write XML files of failed tests to aid in debugging.
logger.info("Writing failed test system to '%s'.{system,state}.xml ..." % testsystem.name)
[system_xml, state_xml] = testsystem.serialize()
xml_file = open(testsystem.name + '.system.xml', 'w')
xml_file.write(system_xml)
xml_file.close()
xml_file = open(testsystem.name + '.state.xml', 'w')
xml_file.write(state_xml)
xml_file.close()
# Place forces into different force groups.
forces = [ system.getForce(force_index) for force_index in range(system.getNumForces()) ]
force_group_names = dict()
group_index = 0
for force_index in range(system.getNumForces()):
force_name = forces[force_index].__class__.__name__
if force_name == 'NonbondedForce':
forces[force_index].setForceGroup(group_index+1)
force_group_names[group_index] = 'NonbondedForce (direct)'
group_index += 1
forces[force_index].setReciprocalSpaceForceGroup(group_index+1)
force_group_names[group_index] = 'NonbondedForce (reciprocal)'
group_index += 1
else:
forces[force_index].setForceGroup(group_index+1)
force_group_names[group_index] = force_name
group_index += 1
ngroups = len(force_group_names)
# Test by force group.
logger.info("Breakdown of discrepancies by Force component:")
nforces = system.getNumForces()
for force_group in range(ngroups):
force_name = force_group_names[force_group]
logger.info(force_name)
[reference_potential, reference_force] = compute_potential_and_force_by_force_group(system, positions, reference_platform, force_group)
logger.info("%16s%16s %16s %16s %16s %16s" % ("platform", "precision", "potential", "error", "force mag", "rms error"))
for platform_index in range(openmm.Platform.getNumPlatforms()):
try:
platform = openmm.Platform.getPlatform(platform_index)
platform_name = platform.getName()
# Define precision models to test.
if platform_name == 'Reference':
precision_models = ['double']
else:
precision_models = ['single']
if platform.supportsDoublePrecision():
precision_models.append('double')
for precision_model in precision_models:
# Set precision.
if platform_name == 'CUDA':
platform.setPropertyDefaultValue('CudaPrecision', precision_model)
if platform_name == 'OpenCL':
platform.setPropertyDefaultValue('OpenCLPrecision', precision_model)
# Compute potential and force.
[platform_potential, platform_force] = compute_potential_and_force_by_force_group(system, positions, platform, force_group)
# Compute error in potential.
potential_error = platform_potential - reference_potential
# Compute per-atom RMS (magnitude) and RMS error in force.
force_unit = units.kilocalories_per_mole / units.nanometers
natoms = system.getNumParticles()
force_mse = (((reference_force - platform_force) / force_unit)**2).sum() / natoms * force_unit**2
force_rmse = units.sqrt(force_mse)
force_ms = ((platform_force / force_unit)**2).sum() / natoms * force_unit**2
force_rms = units.sqrt(force_ms)
logger.info("%16s%16s %16.6f kcal/mol %16.6f kcal/mol %16.6f kcal/mol/nm %16.6f kcal/mol/nm" % (platform_name, precision_model, platform_potential / units.kilocalories_per_mole, potential_error / units.kilocalories_per_mole, force_rms / force_unit, force_rmse / force_unit))
except Exception as e:
logger.info(e)
pass
logger.info("")
logger.info("%d tests failed" % tests_failed)
logger.info("%d tests passed" % tests_passed)
if (tests_failed > 0):
# Signal failure of test.
sys.exit(1)
else:
sys.exit(0)
if __name__ == "__main__":
main()
|
|
from __future__ import print_function
from __future__ import absolute_import
from __future__ import division
from compas.geometry import allclose
from compas.geometry import argmax
from compas.geometry import axis_angle_vector_from_matrix
from compas.geometry import basis_vectors_from_matrix
from compas.geometry import cross_vectors
from compas.geometry import decompose_matrix
from compas.geometry import euler_angles_from_matrix
from compas.geometry import matrix_from_axis_angle_vector
from compas.geometry import matrix_from_basis_vectors
from compas.geometry import matrix_from_euler_angles
from compas.geometry import matrix_from_quaternion
from compas.geometry import quaternion_from_matrix
from compas.geometry import subtract_vectors
from compas.geometry import Transformation
from compas.geometry.primitives import Point
from compas.geometry.primitives import Primitive
from compas.geometry.primitives import Quaternion
from compas.geometry.primitives import Vector
class Frame(Primitive):
"""A frame is defined by a base point and two orthonormal base vectors.
Parameters
----------
point : [float, float, float] | :class:`~compas.geometry.Point`
The origin of the frame.
xaxis : [float, float, float] | :class:`~compas.geometry.Vector`
The x-axis of the frame.
yaxis : [float, float, float] | :class:`~compas.geometry.Vector`
The y-axis of the frame.
Attributes
----------
point : :class:`~compas.geometry.Point`
The base point of the frame.
xaxis : :class:`~compas.geometry.Vector`
The local X axis of the frame.
yaxis : :class:`~compas.geometry.Vector`
The local Y axis of the frame.
zaxis : :class:`~compas.geometry.Vector`, read-only
The Z axis of the frame.
normal : :class:`~compas.geometry.Vector`, read-only
The normal of the base plane of the frame.
quaternion : :class:`~compas.geometry.Quaternion`, read-only
The quaternion from the rotation given by the frame.
axis_angle_vector : :class:`~compas.geometry.Vector`, read-only
The axis-angle vector representing the rotation of the frame.
Notes
-----
All input vectors are orthonormalized when creating a frame, with the first
vector as starting point.
Examples
--------
>>> from compas.geometry import Point
>>> from compas.geometry import Vector
>>> f = Frame([0, 0, 0], [1, 0, 0], [0, 1, 0])
>>> f = Frame(Point(0, 0, 0), Vector(1, 0, 0), Vector(0, 1, 0))
"""
def __init__(self, point, xaxis, yaxis, **kwargs):
super(Frame, self).__init__(**kwargs)
self._point = None
self._xaxis = None
self._yaxis = None
self.point = point
self.xaxis = xaxis
self.yaxis = yaxis
# ==========================================================================
# data
# ==========================================================================
@property
def DATASCHEMA(self):
""":class:`schema.Schema` : Schema of the data representation."""
from schema import Schema
return Schema({
'point': Point.DATASCHEMA.fget(None),
'xaxis': Vector.DATASCHEMA.fget(None),
'yaxis': Vector.DATASCHEMA.fget(None)
})
@property
def JSONSCHEMANAME(self):
"""str : Name of the schema of the data representation in JSON format."""
return 'frame'
@property
def data(self):
"""dict : The data dictionary that represents the frame."""
return {'point': self.point.data,
'xaxis': self.xaxis.data,
'yaxis': self.yaxis.data}
@data.setter
def data(self, data):
self.point = Point.from_data(data['point'])
self.xaxis = Vector.from_data(data['xaxis'])
self.yaxis = Vector.from_data(data['yaxis'])
@classmethod
def from_data(cls, data):
"""Construct a frame from its data representation.
Parameters
----------
data : dict
The data dictionary.
Returns
-------
:class:`~compas.geometry.Frame`
The constructed frame.
Examples
--------
>>> data = {'point': [0.0, 0.0, 0.0], 'xaxis': [1.0, 0.0, 0.0], 'yaxis': [0.0, 1.0, 0.0]}
>>> frame = Frame.from_data(data)
>>> frame.point
Point(0.000, 0.000, 0.000)
>>> frame.xaxis
Vector(1.000, 0.000, 0.000)
>>> frame.yaxis
Vector(0.000, 1.000, 0.000)
"""
frame = cls(Point.from_data(data['point']), Vector.from_data(data['xaxis']), Vector.from_data(data['yaxis']))
return frame
# ==========================================================================
# properties
# ==========================================================================
@property
def point(self):
return self._point
@point.setter
def point(self, point):
self._point = Point(*point)
@property
def xaxis(self):
return self._xaxis
@xaxis.setter
def xaxis(self, vector):
xaxis = Vector(*vector)
xaxis.unitize()
self._xaxis = xaxis
@property
def yaxis(self):
return self._yaxis
@yaxis.setter
def yaxis(self, vector):
yaxis = Vector(*vector)
yaxis.unitize()
zaxis = Vector.cross(self.xaxis, yaxis)
zaxis.unitize()
self._yaxis = Vector.cross(zaxis, self.xaxis)
@property
def normal(self):
return Vector(*cross_vectors(self.xaxis, self.yaxis))
@property
def zaxis(self):
return self.normal
@property
def quaternion(self):
R = matrix_from_basis_vectors(self.xaxis, self.yaxis)
return Quaternion(*quaternion_from_matrix(R))
@property
def axis_angle_vector(self):
R = matrix_from_basis_vectors(self.xaxis, self.yaxis)
return Vector(*axis_angle_vector_from_matrix(R))
# ==========================================================================
# customization
# ==========================================================================
def __repr__(self):
return 'Frame({0!r}, {1!r}, {2!r})'.format(self.point, self.xaxis, self.yaxis)
def __len__(self):
return 3
def __getitem__(self, key):
if key == 0:
return self.point
if key == 1:
return self.xaxis
if key == 2:
return self.yaxis
raise KeyError
def __setitem__(self, key, value):
if key == 0:
self.point = value
return
if key == 1:
self.xaxis = value
return
if key == 2:
self.yaxis = value
raise KeyError
def __iter__(self):
return iter([self.point, self.xaxis, self.yaxis])
def __eq__(self, other, tol=1e-05):
if not hasattr(other, '__iter__') or not hasattr(other, '__len__') or len(self) != len(other):
return False
return allclose(self, other)
# ==========================================================================
# constructors
# ==========================================================================
@classmethod
def worldXY(cls):
"""Construct the world XY frame.
Returns
-------
:class:`~compas.geometry.Frame`
The world XY frame.
Examples
--------
>>> frame = Frame.worldXY()
>>> frame.point
Point(0.000, 0.000, 0.000)
>>> frame.xaxis
Vector(1.000, 0.000, 0.000)
>>> frame.yaxis
Vector(0.000, 1.000, 0.000)
"""
return cls([0, 0, 0], [1, 0, 0], [0, 1, 0])
@classmethod
def worldZX(cls):
"""Construct the world ZX frame.
Returns
-------
:class:`~compas.geometry.Frame`
The world ZX frame.
Examples
--------
>>> frame = Frame.worldZX()
>>> frame.point
Point(0.000, 0.000, 0.000)
>>> frame.xaxis
Vector(0.000, 0.000, 1.000)
>>> frame.yaxis
Vector(1.000, 0.000, 0.000)
"""
return cls([0, 0, 0], [0, 0, 1], [1, 0, 0])
@classmethod
def worldYZ(cls):
"""Construct the world YZ frame.
Returns
-------
:class:`~compas.geometry.Frame`
The world YZ frame.
Examples
--------
>>> frame = Frame.worldYZ()
>>> frame.point
Point(0.000, 0.000, 0.000)
>>> frame.xaxis
Vector(0.000, 1.000, 0.000)
>>> frame.yaxis
Vector(0.000, 0.000, 1.000)
"""
return cls([0, 0, 0], [0, 1, 0], [0, 0, 1])
@classmethod
def from_points(cls, point, point_xaxis, point_xyplane):
"""Constructs a frame from 3 points.
Parameters
----------
point : [float, float, float] | :class:`~compas.geometry.Point`
The origin of the frame.
point_xaxis : [float, float, float] | :class:`~compas.geometry.Point`
A point on the x-axis of the frame.
point_xyplane : [float, float, float] | :class:`~compas.geometry.Point`
A point within the xy-plane of the frame.
Returns
-------
:class:`~compas.geometry.Frame`
The constructed frame.
Examples
--------
>>> frame = Frame.from_points([0, 0, 0], [1, 0, 0], [0, 1, 0])
>>> frame.point
Point(0.000, 0.000, 0.000)
>>> frame.xaxis
Vector(1.000, 0.000, 0.000)
>>> frame.yaxis
Vector(0.000, 1.000, 0.000)
"""
xaxis = subtract_vectors(point_xaxis, point)
xyvec = subtract_vectors(point_xyplane, point)
yaxis = cross_vectors(cross_vectors(xaxis, xyvec), xaxis)
return cls(point, xaxis, yaxis)
@classmethod
def from_rotation(cls, rotation, point=[0, 0, 0]):
"""Constructs a frame from a Rotation.
Parameters
----------
rotation : :class:`~compas.geometry.Rotation`
The rotation defines the orientation of the frame.
point : [float, float, float] | :class:`~compas.geometry.Point`, optional
The origin of the frame.
Returns
-------
:class:`~compas.geometry.Frame`
The constructed frame.
Examples
--------
>>> from compas.geometry import Rotation
>>> f1 = Frame([1, 1, 1], [0.68, 0.68, 0.27], [-0.67, 0.73, -0.15])
>>> R = Rotation.from_frame(f1)
>>> f2 = Frame.from_rotation(R, point=f1.point)
>>> f1 == f2
True
"""
xaxis, yaxis = rotation.basis_vectors
return cls(point, xaxis, yaxis)
@classmethod
def from_transformation(cls, transformation):
"""Constructs a frame from a Transformation.
Parameters
----------
transformation : :class:`~compas.geometry.Transformation`
The transformation defines the orientation of the frame through the
rotation and the origin through the translation.
Returns
-------
:class:`~compas.geometry.Frame`
The constructed frame.
Examples
--------
>>> from compas.geometry import Transformation
>>> f1 = Frame([1, 1, 1], [0.68, 0.68, 0.27], [-0.67, 0.73, -0.15])
>>> T = Transformation.from_frame(f1)
>>> f2 = Frame.from_transformation(T)
>>> f1 == f2
True
"""
xaxis, yaxis = transformation.basis_vectors
point = transformation.translation_vector
return cls(point, xaxis, yaxis)
@classmethod
def from_matrix(cls, matrix):
"""Construct a frame from a matrix.
Parameters
----------
matrix : list[list[float]]
The 4x4 transformation matrix in row-major order.
Returns
-------
:class:`~compas.geometry.Frame`
The constructed frame.
Examples
--------
>>> from compas.geometry import matrix_from_euler_angles
>>> ea1 = [0.5, 0.4, 0.8]
>>> M = matrix_from_euler_angles(ea1)
>>> f = Frame.from_matrix(M)
>>> ea2 = f.euler_angles()
>>> allclose(ea1, ea2)
True
"""
_, _, angles, point, _ = decompose_matrix(matrix)
R = matrix_from_euler_angles(angles, static=True, axes='xyz')
xaxis, yaxis = basis_vectors_from_matrix(R)
return cls(point, xaxis, yaxis)
@classmethod
def from_list(cls, values):
"""Construct a frame from a list of 12 or 16 float values.
Parameters
----------
values : list[float]
The list of 12 or 16 values representing a 4x4 matrix.
Returns
-------
:class:`~compas.geometry.Frame`
The constructed frame.
Raises
------
ValueError
If the length of the list is neither 12 nor 16.
Notes
-----
Since the transformation matrix follows the row-major order, the
translational components must be at the list's indices 3, 7, 11.
Examples
--------
>>> l = [-1.0, 0.0, 0.0, 8110, 0.0, 0.0, -1.0, 7020, 0.0, -1.0, 0.0, 1810]
>>> f = Frame.from_list(l)
"""
if len(values) == 12:
values.extend([0., 0., 0., 1.])
if len(values) != 16:
raise ValueError(
'Expected 12 or 16 floats but got %d' %
len(values))
matrix = [[0. for i in range(4)] for j in range(4)]
for i in range(4):
for j in range(4):
matrix[i][j] = float(values[i * 4 + j])
return cls.from_matrix(matrix)
@classmethod
def from_quaternion(cls, quaternion, point=[0, 0, 0]):
"""Construct a frame from a rotation represented by quaternion coefficients.
Parameters
----------
quaternion : [float, float, float, float] | :class:`~compas.geometry.Quaternion`
Four numbers that represent the four coefficient values of a quaternion.
point : [float, float, float] | :class:`~compas.geometry.Point`, optional
The point of the frame.
Returns
-------
:class:`~compas.geometry.Frame`
The constructed frame.
Examples
--------
>>> q1 = [0.945, -0.021, -0.125, 0.303]
>>> f = Frame.from_quaternion(q1, point=[1., 1., 1.])
>>> q2 = f.quaternion
>>> allclose(q1, q2, tol=1e-03)
True
"""
R = matrix_from_quaternion(quaternion)
xaxis, yaxis = basis_vectors_from_matrix(R)
return cls(point, xaxis, yaxis)
@classmethod
def from_axis_angle_vector(cls, axis_angle_vector, point=[0, 0, 0]):
"""Construct a frame from an axis-angle vector representing the rotation.
Parameters
----------
axis_angle_vector : [float, float, float]
Three numbers that represent the axis of rotation and angle of
rotation by its magnitude.
point : [float, float, float] | :class:`~compas.geometry.Point`, optional
The point of the frame.
Returns
-------
:class:`~compas.geometry.Frame`
The constructed frame.
Examples
--------
>>> aav1 = [-0.043, -0.254, 0.617]
>>> f = Frame.from_axis_angle_vector(aav1, point=[0, 0, 0])
>>> aav2 = f.axis_angle_vector
>>> allclose(aav1, aav2)
True
"""
R = matrix_from_axis_angle_vector(axis_angle_vector)
xaxis, yaxis = basis_vectors_from_matrix(R)
return cls(point, xaxis, yaxis)
@classmethod
def from_euler_angles(cls, euler_angles, static=True, axes='xyz', point=[0, 0, 0]):
"""Construct a frame from a rotation represented by Euler angles.
Parameters
----------
euler_angles : [float, float, float]
Three numbers that represent the angles of rotations about the defined axes.
static : bool, optional
If True, the rotations are applied to a static frame.
If False, to a rotational.
axes : str, optional
A 3 character string specifying the order of the axes.
point : [float, float, float] | :class:`~compas.geometry.Point`, optional
The point of the frame.
Returns
-------
:class:`~compas.geometry.Frame`
The constructed frame.
Examples
--------
>>> ea1 = 1.4, 0.5, 2.3
>>> f = Frame.from_euler_angles(ea1, static=True, axes='xyz')
>>> ea2 = f.euler_angles(static=True, axes='xyz')
>>> allclose(ea1, ea2)
True
"""
R = matrix_from_euler_angles(euler_angles, static, axes)
xaxis, yaxis = basis_vectors_from_matrix(R)
return cls(point, xaxis, yaxis)
@classmethod
def from_plane(cls, plane):
"""Constructs a frame from a plane.
Xaxis and yaxis are arbitrarily selected based on the plane's normal.
Parameters
----------
plane : [point, vector] | :class:`~compas.geometry.Plane`
A plane.
Returns
-------
:class:`~compas.geometry.Frame`
The constructed frame.
Examples
--------
>>> from compas.geometry import Plane
>>> plane = Plane([0,0,0], [0,0,1])
>>> frame = Frame.from_plane(plane)
>>> allclose(frame.normal, plane.normal)
True
"""
point, normal = plane
# To construct a frame we need to find a vector v that is perpendicular
# to the plane's normal. This means that the dot-product of v with the
# normal must be equal to 0, which is true for the following vectors:
vectors = [Vector(-normal[1], normal[0], 0),
Vector(0, -normal[2], normal[1]),
Vector(normal[2], 0, -normal[0])]
# But if we are unlucky, one of these vectors is (0, 0, 0), so we
# choose the vector with the longest length as xaxis.
idx = argmax([v.length for v in vectors])
xaxis = vectors[idx]
yaxis = cross_vectors(normal, xaxis)
return cls(point, xaxis, yaxis)
# ==========================================================================
# static
# ==========================================================================
@staticmethod
def local_to_local_coordinates(frame1, frame2, object_in_frame1):
"""Returns the object's coordinates in frame1 in the local coordinates of frame2.
Parameters
----------
frame1 : [point, vector, vector] | :class:`~compas.geometry.Frame`
A frame representing one local coordinate system.
frame2 : [point, vector, vector] | :class:`~compas.geometry.Frame`
A frame representing another local coordinate system.
object_in_frame1 : [float, float, float] | :class:`~compas.geometry.Geometry`
An object in the coordinate frame1.
If you pass a list of float, it is assumed to represent a point.
Returns
-------
:class:`~compas.geometry.Geometry`
The object in the local coordinate system of frame2.
Examples
--------
>>> from compas.geometry import Point
>>> frame1 = Frame([1, 1, 1], [0.68, 0.68, 0.27], [-0.67, 0.73, -0.15])
>>> frame2 = Frame([2, 1, 3], [1., 0., 0.], [0., 1., 0.])
>>> p1 = Point(2, 2, 2) # point in frame1
>>> p2 = Frame.local_to_local_coordinates(frame1, frame2, p1) # point in frame2
>>> Frame.local_to_local_coordinates(frame2, frame1, p2)
Point(2.000, 2.000, 2.000)
"""
T = Transformation.from_change_of_basis(frame1, frame2)
if isinstance(object_in_frame1, (list, tuple)):
return Point(*object_in_frame1).transformed(T)
return object_in_frame1.transformed(T)
# ==========================================================================
# methods
# ==========================================================================
def euler_angles(self, static=True, axes='xyz'):
"""The Euler angles from the rotation given by the frame.
Parameters
----------
static : bool, optional
If True the rotations are applied to a static frame.
If False, to a rotational.
axes : str, optional
A 3 character string specifying the order of the axes.
Returns
-------
list[float]
Three numbers that represent the angles of rotations about the defined axes.
Examples
--------
>>> ea1 = 1.4, 0.5, 2.3
>>> f = Frame.from_euler_angles(ea1, static=True, axes='xyz')
>>> ea2 = f.euler_angles(static=True, axes='xyz')
>>> allclose(ea1, ea2)
True
"""
R = matrix_from_basis_vectors(self.xaxis, self.yaxis)
return euler_angles_from_matrix(R, static, axes)
def to_local_coordinates(self, obj_in_wcf):
"""Returns the object's coordinates in the local coordinate system of the frame.
Parameters
----------
obj_in_wcf : [float, float, float] | :class:`~compas.geometry.Geometry`
An object in the world coordinate frame.
Returns
-------
:class:`~compas.geometry.Geometry`
The object in the local coordinate system of the frame.
Notes
-----
If you pass a list of floats, it is assumed to represent a point.
Examples
--------
>>> from compas.geometry import Point
>>> frame = Frame([1, 1, 1], [0.68, 0.68, 0.27], [-0.67, 0.73, -0.15])
>>> pw = Point(2, 2, 2) # point in wcf
>>> pl = frame.to_local_coordinates(pw) # point in frame
>>> frame.to_world_coordinates(pl)
Point(2.000, 2.000, 2.000)
"""
T = Transformation.from_change_of_basis(Frame.worldXY(), self)
if isinstance(obj_in_wcf, (list, tuple)):
return Point(*obj_in_wcf).transformed(T)
return obj_in_wcf.transformed(T)
def to_world_coordinates(self, obj_in_lcf):
"""Returns the object's coordinates in the global coordinate frame.
Parameters
----------
obj_in_lcf : [float, float, float] | :class:`~compas.geometry.Geometry`
An object in local coordinate system of the frame.
Returns
-------
:class:`~compas.geometry.Geometry`
The object in the world coordinate frame.
Notes
-----
If you pass a list of floats, it is assumed to represent a point.
Examples
--------
>>> from compas.geometry import Point
>>> frame = Frame([1, 1, 1], [0.68, 0.68, 0.27], [-0.67, 0.73, -0.15])
>>> pl = Point(1.632, -0.090, 0.573) # point in frame
>>> pw = frame.to_world_coordinates(pl) # point in wcf
>>> frame.to_local_coordinates(pw)
Point(1.632, -0.090, 0.573)
"""
T = Transformation.from_change_of_basis(self, Frame.worldXY())
if isinstance(obj_in_lcf, list):
return Point(*obj_in_lcf).transformed(T)
return obj_in_lcf.transformed(T)
def transform(self, T):
"""Transform the frame.
Parameters
----------
T : :class:`~compas.geometry.Transformation`
The transformation.
Examples
--------
>>> from compas.geometry import Transformation
>>> f1 = Frame([1, 1, 1], [0.68, 0.68, 0.27], [-0.67, 0.73, -0.15])
>>> T = Transformation.from_frame(f1)
>>> f2 = Frame.worldXY()
>>> f2.transform(T)
>>> f1 == f2
True
"""
# replace this by function call
X = T * Transformation.from_frame(self)
point = X.translation_vector
xaxis, yaxis = X.basis_vectors
self.point = point
self.xaxis = xaxis
self.yaxis = yaxis
|
|
# Copyright 2014 Open Source Robotics Foundation, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import print_function
import argparse
import os
import re
import sys
from multiprocessing import cpu_count
from catkin_tools.common import wide_log
import catkin_tools.execution.job_server as job_server
def add_context_args(parser):
"""Add common workspace and profile args to an argparse parser.
:param parser: The python argparse parser object (or subparser)
:type parser: ArgumentParser
"""
add = parser.add_argument
add_workspace_arg(parser)
add('--profile', default=None,
help='The name of a config profile to use (default: active profile)')
def add_workspace_arg(parser):
"""Add common workspace arg to an argparse parser.
:param parser: The python argparse parser object (or subparser)
:type parser: ArgumentParser
"""
add = parser.add_argument
add('--workspace', '-w', default=None,
help='The path to the catkin_tools workspace or a directory contained within it (default: ".")')
def add_cmake_and_make_and_catkin_make_args(parser):
"""Add common make and cmake args to an argparse parser.
:param parser: The python argparse parser object (or subparser)
:type parser: ArgumentParser
"""
add = parser.add_argument
add('-j', '--jobs', default=None, type=int,
help='Maximum number of build jobs to be distributed across active packages. (default is cpu count)')
add('-p', '--parallel-packages', metavar='PACKAGE_JOBS', dest='parallel_jobs', default=None, type=int,
help='Maximum number of packages allowed to be built in parallel (default is cpu count)')
add('-l', '--load-average', default=None, type=float,
help='Maximum load average before no new build jobs are scheduled')
# Deprecated flags kept for compatibility
add('--parallel-jobs', '--parallel', action='store_true', dest='parallel_jobs', help=argparse.SUPPRESS)
add = parser.add_mutually_exclusive_group().add_argument
add('--jobserver', dest='use_internal_make_jobserver', default=None, action='store_true',
help='Use the internal GNU Make job server which will limit the number '
'of Make jobs across all active packages.')
add('--no-jobserver', dest='use_internal_make_jobserver', default=None, action='store_false',
help='Disable the internal GNU Make job server, and use an external one (like distcc, for example).')
add = parser.add_mutually_exclusive_group().add_argument
add('--env-cache', dest='use_env_cache', default=None, action='store_true',
help='Re-use cached environment variables when re-sourcing a resultspace that has been '
'loaded at a different stage in the task.')
add('--no-env-cache', dest='use_env_cache', default=None, action='store_false',
help='Don\'t cache environment variables when re-sourcing the same resultspace.')
add = parser.add_mutually_exclusive_group().add_argument
add('--cmake-args', metavar='ARG', dest='cmake_args', nargs='+', required=False, type=str, default=None,
help='Arbitrary arguments which are passed to CMake. '
'It collects all of following arguments until a "--" is read.')
add('--no-cmake-args', dest='cmake_args', action='store_const', const=[], default=None,
help='Pass no additional arguments to CMake.')
add = parser.add_mutually_exclusive_group().add_argument
add('--make-args', metavar='ARG', dest='make_args', nargs='+', required=False, type=str, default=None,
help='Arbitrary arguments which are passed to make. '
'It collects all of following arguments until a "--" is read.')
add('--no-make-args', dest='make_args', action='store_const', const=[], default=None,
help='Pass no additional arguments to make (does not affect --catkin-make-args).')
add = parser.add_mutually_exclusive_group().add_argument
add('--catkin-make-args', metavar='ARG', dest='catkin_make_args',
nargs='+', required=False, type=str, default=None,
help='Arbitrary arguments which are passed to make but only for catkin packages. '
'It collects all of following arguments until a "--" is read.')
add('--no-catkin-make-args', dest='catkin_make_args', action='store_const', const=[], default=None,
help='Pass no additional arguments to make for catkin packages (does not affect --make-args).')
def split_arguments(args, splitter_name=None, splitter_index=None):
"""Split list of args into (other, split_args, other) between splitter_name/index and `--`
:param args: list of all arguments
:type args: list of str
:param splitter_name: optional argument used to split out specific args
:type splitter_name: str
:param splitter_index: specific index at which to split
:type splitter_index: int
:returns: tuple (other, split_args)
"""
if splitter_index is None:
if splitter_name not in args:
return args, []
splitter_index = args.index(splitter_name)
start_index = splitter_index + 1
end_index = args.index('--', start_index) if '--' in args[start_index:] else None
if end_index:
return (
args[0:splitter_index],
args[start_index:end_index],
args[(end_index + 1):]
)
else:
return (
args[0:splitter_index],
args[start_index:],
[]
)
def _extract_cmake_and_make_arguments(args, extract_catkin_make):
"""Extract arguments which are meant to be passed to CMake and GNU Make
through the catkin_tools command line interface.
:param args: system arguments from which special arguments need to be extracted
:type args: list
:returns: tuple of separate args, cmake_args, make args, and catkin make args
:rtype: tuple
"""
cmake_args = []
make_args = []
catkin_make_args = []
arg_types = {}
if '--no-cmake-args' not in args:
arg_types['--cmake-args'] = cmake_args
if '--no-make-args' not in args:
arg_types['--make-args'] = make_args
if '--no-catkin_make_args' not in args and extract_catkin_make:
arg_types['--catkin-make-args'] = catkin_make_args
# Get the splitter indexes for each type (multiples allowed) starting at the end
ordered_splitters = reversed([
(i, t)
for i, t in enumerate(args)
if t in arg_types
])
# Extract explicit specific args
head_args = args
tail_args = []
for index, name in ordered_splitters:
# Update whole args list, get specific args
head_args, specific, tail = split_arguments(head_args, splitter_index=index)
tail_args.extend(tail)
arg_types[name][0:0] = specific
args = head_args + tail_args
# classify -D* and -G* arguments as cmake specific arguments
if '--cmake-args' in arg_types:
implicit_cmake_args = [a for a in args if a.startswith('-D') or a.startswith('-G')]
args = [a for a in args if a not in implicit_cmake_args]
cmake_args = implicit_cmake_args + cmake_args
if '--no-cmake-args' not in args and len(cmake_args) == 0:
cmake_args = None
if '--no-make-args' not in args and len(make_args) == 0:
make_args = None
if '--no-catkin-make-args' not in args and len(catkin_make_args) == 0 and extract_catkin_make:
catkin_make_args = None
return args, cmake_args, make_args, catkin_make_args
def extract_cmake_and_make_and_catkin_make_arguments(args):
"""Extracts cmake, make, and catkin specific make arguments from given system arguments
:param args: system arguments from which special arguments need to be extracted
:type args: list
:returns: tuple of separate args, cmake_args, make args, and catkin make args
:rtype: tuple
"""
return _extract_cmake_and_make_arguments(args, extract_catkin_make=True)
def extract_cmake_and_make_arguments(args):
"""Extracts cmake and make arguments from the given system arguments
:param args: system arguments from which special arguments need to be extracted
:type args: list
:returns: tuple of separate args, cmake_args, and make_args
:rtype: tuple
"""
args, cmake_args, make_args, _ = _extract_cmake_and_make_arguments(args, extract_catkin_make=False)
return args, cmake_args, make_args
def extract_jobs_flags_values(mflags):
"""Gets the values of the make jobs flags
:param mflags: string of space separated make arguments
:type mflags: str
:returns: dictionary mapping jobs flags to jobs flags values
:rtype: dict
"""
jobs_dict = {'jobs': None, 'load-average': None}
# These regular expressions use (?P<name>...) for named capture groups
# (^|\s) and (?=$|\s) make sure that the flag is surrounded by whitespace
regex = r'(^|\s)(-j\s*|--jobs(=|\s+))(?P<jobs>\d*)(?=$|\s)'
for m in re.finditer(regex, mflags):
if m.group('jobs'):
jobs_dict['jobs'] = int(m.group('jobs'))
regex = r'(^|\s)(-l\s*|--load-average(=|\s+))(?P<load>\d*\.?\d*)(?=$|\s)'
for m in re.finditer(regex, mflags):
if m.group('load'):
jobs_dict['load-average'] = float(m.group('load'))
return jobs_dict
def extract_jobs_flags(mflags):
"""Extracts make job flags from a list of other make flags, i.e. -j8 -l8
:param mflags: string of space separated make arguments
:type mflags: str
:returns: list of make jobs flags
:rtype: list
"""
if not mflags:
return []
# Each line matches a flag type, i.e. -j, -l, --jobs, --load-average
# (?:^|\s) and (?=$|\s) make sure that the flag is surrounded by whitespace
# (?:...) is just a group that will not be captured, this is necessary because the whole flag should be captured
# The upper two expressions are simple, they just match the flag, optional whitespace and an optional number
# The bottom two expressions are more complicated because the long flag may be # followed by '=' and a number,
# whitespace and a number or nothing
regex = r'(?:^|\s)(-j\s*\d*)(?=$|\s)|' + \
r'(?:^|\s)(-l\s*\d*\.?\d*)(?=$|\s)|' + \
r'(?:^|\s)(--jobs(?:(?:=|\s+)\d+)?)(?=$|\s)|' + \
r'(?:^|\s)(--load-average(?:(?:=|\s+)\d*\.?\d+)?)(?=$|\s)'
filtered_flags = []
for match in re.findall(regex, mflags):
filtered_flags.extend([m.strip() for m in match if m])
return filtered_flags or None
def handle_make_arguments(
input_make_args,
force_single_threaded_when_running_tests=False):
"""Special handling for make arguments.
If force_single_threaded_when_running_tests is True, jobs flags are
replaced with -j1, because tests cannot handle parallelization.
If no job flags are present and there are none in the MAKEFLAGS environment
variable, then make flags are set to the cpu_count, e.g. -j4 -l4.
:param input_make_args: list of make arguments to be handled
:type input_make_args: list
:param force_single_threaded_when_running_tests: self explanatory
:type force_single_threaded_when_running_tests: bool
:returns: copied list of make arguments, potentially with some modifications
:rtype: list
"""
make_args = list(input_make_args)
# Get the values for the jobs flags which may be in the make args
jobs_dict = extract_jobs_flags_values(' '.join(make_args))
jobs_args = extract_jobs_flags(' '.join(make_args))
if jobs_args:
# Remove jobs flags from cli args if they're present
make_args = re.sub(' '.join(jobs_args), '', ' '.join(make_args)).split()
if force_single_threaded_when_running_tests:
# force single threaded execution when running test since rostest does not support multiple parallel runs
run_tests = [a for a in make_args if a.startswith('run_tests')]
if run_tests:
wide_log('Forcing "-j1" for running unit tests.')
jobs_dict['jobs'] = 1
if job_server.gnu_make_enabled():
make_args.extend(job_server.gnu_make_args())
else:
if 'jobs' in jobs_dict:
make_args.append('-j{0}'.format(jobs_dict['jobs']))
if 'load-average' in jobs_dict:
make_args.append('-l{0}'.format(jobs_dict['load-average']))
return make_args
def configure_make_args(make_args, jobs_args, use_internal_make_jobserver):
"""Initialize the internal GNU Make jobserver or configure it as a pass-through
:param make_args: arguments to be passed to GNU Make
:type make_args: list
:param use_internal_make_jobserver: if true, use the internal jobserver
:type make_args: bool
:rtype: tuple (final make_args, using makeflags, using cliflags, using jobserver)
"""
# Configure default jobs options: use all CPUs in each package
try:
# NOTE: this will yeild greater than 100% CPU utilization
n_cpus = cpu_count()
jobs_flags = {
'jobs': n_cpus,
'load-average': n_cpus + 1}
except NotImplementedError:
# If the number of cores cannot be determined, limit to one job
jobs_flags = {
'jobs': 1,
'load-average': 1}
# Get MAKEFLAGS from environment
makeflags_jobs_flags = extract_jobs_flags(os.environ.get('MAKEFLAGS', ''))
using_makeflags_jobs_flags = makeflags_jobs_flags is not None
if using_makeflags_jobs_flags:
makeflags_jobs_flags_dict = extract_jobs_flags_values(' '.join(makeflags_jobs_flags))
jobs_flags.update(makeflags_jobs_flags_dict)
# Extract make jobs flags (these override MAKEFLAGS)
cli_jobs_flags = jobs_args
using_cli_flags = len(cli_jobs_flags) > 0
if cli_jobs_flags:
jobs_flags.update(extract_jobs_flags_values(' '.join(cli_jobs_flags)))
# Remove jobs flags from cli args if they're present
make_args = re.sub(' '.join(cli_jobs_flags), '', ' '.join(make_args)).split()
# Instantiate the jobserver
job_server.initialize(
max_jobs=jobs_flags.get('jobs', None),
max_load=jobs_flags.get('load-average', None),
gnu_make_enabled=use_internal_make_jobserver)
# If the jobserver is supported
if job_server.gnu_make_enabled():
jobs_args = []
else:
jobs_args = cli_jobs_flags
return make_args + jobs_args, using_makeflags_jobs_flags, using_cli_flags, job_server.gnu_make_enabled()
def argument_preprocessor(args):
"""Perform processing of argument patterns which are not captured by
argparse, before being passed to argparse
:param args: system arguments from which special arguments need to be extracted
:type args: list
:returns: a tuple contianing a list of the arguments which can be handled
by argparse and a dict of the extra arguments which this function has
extracted
:rtype: tuple
"""
# CMake/make pass-through flags collect dashed options. They require special
# handling or argparse will complain about unrecognized options.
# NOTE: http://bugs.python.org/issue9334
args = sys.argv[1:] if args is None else args
extract_make_args = extract_cmake_and_make_and_catkin_make_arguments
args, cmake_args, make_args, catkin_make_args = extract_make_args(args)
# Extract make jobs flags (these override MAKEFLAGS later on)
jobs_args = extract_jobs_flags(' '.join(args))
if jobs_args:
# Remove jobs flags from cli args if they're present
args = re.sub(' '.join(jobs_args), '', ' '.join(args)).split()
elif make_args is not None:
jobs_args = extract_jobs_flags(' '.join(make_args))
if jobs_args:
# Remove jobs flags from cli args if they're present
make_args = re.sub(' '.join(jobs_args), '', ' '.join(make_args)).split()
extras = {
'cmake_args': cmake_args,
'make_args': make_args,
'jobs_args': jobs_args,
'catkin_make_args': catkin_make_args,
}
return args, extras
|
|
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Common utilities for OpenStack
"""
from libcloud.utils.py3 import ET
from libcloud.utils.py3 import httplib
from libcloud.common.base import ConnectionUserAndKey, Response
from libcloud.common.types import ProviderError
from libcloud.compute.types import (LibcloudError, MalformedResponseError)
from libcloud.compute.types import KeyPairDoesNotExistError
from libcloud.common.openstack_identity import get_class_for_auth_version
# Imports for backward compatibility reasons
from libcloud.common.openstack_identity import (OpenStackServiceCatalog,
OpenStackIdentityTokenScope)
try:
import simplejson as json
except ImportError:
import json
AUTH_API_VERSION = '1.1'
# Auth versions which contain token expiration information.
AUTH_VERSIONS_WITH_EXPIRES = [
'1.1',
'2.0',
'2.0_apikey',
'2.0_password',
'3.x',
'3.x_password'
]
__all__ = [
'OpenStackBaseConnection',
'OpenStackResponse',
'OpenStackException',
'OpenStackDriverMixin'
]
class OpenStackBaseConnection(ConnectionUserAndKey):
"""
Base class for OpenStack connections.
:param user_id: User name to use when authenticating
:type user_id: ``str``
:param key: Secret to use when authenticating.
:type key: ``str``
:param secure: Use HTTPS? (True by default.)
:type secure: ``bool``
:param ex_force_base_url: Base URL for connection requests. If
not specified, this will be determined by
authenticating.
:type ex_force_base_url: ``str``
:param ex_force_auth_url: Base URL for authentication requests.
:type ex_force_auth_url: ``str``
:param ex_force_auth_version: Authentication version to use. If
not specified, defaults to AUTH_API_VERSION.
:type ex_force_auth_version: ``str``
:param ex_force_auth_token: Authentication token to use for connection
requests. If specified, the connection will
not attempt to authenticate, and the value
of ex_force_base_url will be used to
determine the base request URL. If
ex_force_auth_token is passed in,
ex_force_base_url must also be provided.
:type ex_force_auth_token: ``str``
:param token_scope: Whether to scope a token to a "project", a
"domain" or "unscoped".
:type token_scope: ``str``
:param ex_domain_name: When authenticating, provide this domain name to
the identity service. A scoped token will be
returned. Some cloud providers require the domain
name to be provided at authentication time. Others
will use a default domain if none is provided.
:type ex_domain_name: ``str``
:param ex_tenant_name: When authenticating, provide this tenant name to the
identity service. A scoped token will be returned.
Some cloud providers require the tenant name to be
provided at authentication time. Others will use a
default tenant if none is provided.
:type ex_tenant_name: ``str``
:param ex_force_service_type: Service type to use when selecting an
service. If not specified, a provider
specific default will be used.
:type ex_force_service_type: ``str``
:param ex_force_service_name: Service name to use when selecting an
service. If not specified, a provider
specific default will be used.
:type ex_force_service_name: ``str``
:param ex_force_service_region: Region to use when selecting an service.
If not specified, a provider specific
default will be used.
:type ex_force_service_region: ``str``
"""
auth_url = None
auth_token = None
auth_token_expires = None
auth_user_info = None
service_catalog = None
service_type = None
service_name = None
service_region = None
accept_format = None
_auth_version = None
def __init__(self, user_id, key, secure=True,
host=None, port=None, timeout=None, proxy_url=None,
ex_force_base_url=None,
ex_force_auth_url=None,
ex_force_auth_version=None,
ex_force_auth_token=None,
ex_token_scope=OpenStackIdentityTokenScope.PROJECT,
ex_domain_name='Default',
ex_tenant_name=None,
ex_force_service_type=None,
ex_force_service_name=None,
ex_force_service_region=None,
retry_delay=None, backoff=None):
super(OpenStackBaseConnection, self).__init__(
user_id, key, secure=secure, timeout=timeout,
retry_delay=retry_delay, backoff=backoff, proxy_url=proxy_url)
if ex_force_auth_version:
self._auth_version = ex_force_auth_version
self._ex_force_base_url = ex_force_base_url
self._ex_force_auth_url = ex_force_auth_url
self._ex_force_auth_token = ex_force_auth_token
self._ex_token_scope = ex_token_scope
self._ex_domain_name = ex_domain_name
self._ex_tenant_name = ex_tenant_name
self._ex_force_service_type = ex_force_service_type
self._ex_force_service_name = ex_force_service_name
self._ex_force_service_region = ex_force_service_region
self._osa = None
if ex_force_auth_token and not ex_force_base_url:
raise LibcloudError(
'Must also provide ex_force_base_url when specifying '
'ex_force_auth_token.')
if ex_force_auth_token:
self.auth_token = ex_force_auth_token
if not self._auth_version:
self._auth_version = AUTH_API_VERSION
auth_url = self._get_auth_url()
if not auth_url:
raise LibcloudError('OpenStack instance must ' +
'have auth_url set')
def get_auth_class(self):
"""
Retrieve identity / authentication class instance.
:rtype: :class:`OpenStackIdentityConnection`
"""
if not self._osa:
auth_url = self._get_auth_url()
cls = get_class_for_auth_version(auth_version=self._auth_version)
self._osa = cls(auth_url=auth_url,
user_id=self.user_id,
key=self.key,
tenant_name=self._ex_tenant_name,
domain_name=self._ex_domain_name,
token_scope=self._ex_token_scope,
timeout=self.timeout,
parent_conn=self)
return self._osa
def request(self, action, params=None, data='', headers=None,
method='GET', raw=False):
headers = headers or {}
params = params or {}
# Include default content-type for POST and PUT request (if available)
default_content_type = getattr(self, 'default_content_type', None)
if method.upper() in ['POST', 'PUT'] and default_content_type:
headers = {'Content-Type': default_content_type}
return super(OpenStackBaseConnection, self).request(action=action,
params=params,
data=data,
method=method,
headers=headers,
raw=raw)
def _get_auth_url(self):
"""
Retrieve auth url for this instance using either "ex_force_auth_url"
constructor kwarg of "auth_url" class variable.
"""
auth_url = self.auth_url
if self._ex_force_auth_url is not None:
auth_url = self._ex_force_auth_url
return auth_url
def get_service_catalog(self):
if self.service_catalog is None:
self._populate_hosts_and_request_paths()
return self.service_catalog
def get_service_name(self):
"""
Gets the service name used to look up the endpoint in the service
catalog.
:return: name of the service in the catalog
"""
if self._ex_force_service_name:
return self._ex_force_service_name
return self.service_name
def get_endpoint(self):
"""
Selects the endpoint to use based on provider specific values,
or overrides passed in by the user when setting up the driver.
:returns: url of the relevant endpoint for the driver
"""
service_type = self.service_type
service_name = self.service_name
service_region = self.service_region
if self._ex_force_service_type:
service_type = self._ex_force_service_type
if self._ex_force_service_name:
service_name = self._ex_force_service_name
if self._ex_force_service_region:
service_region = self._ex_force_service_region
endpoint = self.service_catalog.get_endpoint(service_type=service_type,
name=service_name,
region=service_region)
url = endpoint.url
if not url:
raise LibcloudError('Could not find specified endpoint')
return url
def add_default_headers(self, headers):
headers['X-Auth-Token'] = self.auth_token
headers['Accept'] = self.accept_format
return headers
def morph_action_hook(self, action):
self._populate_hosts_and_request_paths()
return super(OpenStackBaseConnection, self).morph_action_hook(action)
def _set_up_connection_info(self, url):
result = self._tuple_from_url(url)
(self.host, self.port, self.secure, self.request_path) = result
self.connect()
def _populate_hosts_and_request_paths(self):
"""
OpenStack uses a separate host for API calls which is only provided
after an initial authentication request.
"""
osa = self.get_auth_class()
if self._ex_force_auth_token:
# If ex_force_auth_token is provided we always hit the api directly
# and never try to authenticate.
#
# Note: When ex_force_auth_token is provided, ex_force_base_url
# must be provided as well.
self._set_up_connection_info(url=self._ex_force_base_url)
return
if not osa.is_token_valid():
# Token is not available or it has expired. Need to retrieve a
# new one.
if self._auth_version == '2.0_apikey':
kwargs = {'auth_type': 'api_key'}
elif self._auth_version == '2.0_password':
kwargs = {'auth_type': 'password'}
else:
kwargs = {}
osa = osa.authenticate(**kwargs) # may throw InvalidCreds
self.auth_token = osa.auth_token
self.auth_token_expires = osa.auth_token_expires
self.auth_user_info = osa.auth_user_info
# Pull out and parse the service catalog
osc = OpenStackServiceCatalog(service_catalog=osa.urls,
auth_version=self._auth_version)
self.service_catalog = osc
url = self._ex_force_base_url or self.get_endpoint()
self._set_up_connection_info(url=url)
class OpenStackException(ProviderError):
pass
class OpenStackResponse(Response):
node_driver = None
def success(self):
i = int(self.status)
return i >= 200 and i <= 299
def has_content_type(self, content_type):
content_type_value = self.headers.get('content-type') or ''
content_type_value = content_type_value.lower()
return content_type_value.find(content_type.lower()) > -1
def parse_body(self):
if self.status == httplib.NO_CONTENT or not self.body:
return None
if self.has_content_type('application/xml'):
try:
return ET.XML(self.body)
except:
raise MalformedResponseError(
'Failed to parse XML',
body=self.body,
driver=self.node_driver)
elif self.has_content_type('application/json'):
try:
return json.loads(self.body)
except:
raise MalformedResponseError(
'Failed to parse JSON',
body=self.body,
driver=self.node_driver)
else:
return self.body
def parse_error(self):
text = None
body = self.parse_body()
if self.has_content_type('application/xml'):
text = '; '.join([err.text or '' for err in body.getiterator()
if err.text])
elif self.has_content_type('application/json'):
values = list(body.values())
context = self.connection.context
driver = self.connection.driver
key_pair_name = context.get('key_pair_name', None)
if len(values) > 0 and values[0]['code'] == 404 and key_pair_name:
raise KeyPairDoesNotExistError(name=key_pair_name,
driver=driver)
elif len(values) > 0 and 'message' in values[0]:
text = ';'.join([fault_data['message'] for fault_data
in values])
else:
text = body
else:
# while we hope a response is always one of xml or json, we have
# seen html or text in the past, its not clear we can really do
# something to make it more readable here, so we will just pass
# it along as the whole response body in the text variable.
text = body
return '%s %s %s' % (self.status, self.error, text)
class OpenStackDriverMixin(object):
def __init__(self, *args, **kwargs):
self._ex_force_base_url = kwargs.get('ex_force_base_url', None)
self._ex_force_auth_url = kwargs.get('ex_force_auth_url', None)
self._ex_force_auth_version = kwargs.get('ex_force_auth_version', None)
self._ex_force_auth_token = kwargs.get('ex_force_auth_token', None)
self._ex_token_scope = kwargs.get('ex_token_scope', None)
self._ex_domain_name = kwargs.get('ex_domain_name', None)
self._ex_tenant_name = kwargs.get('ex_tenant_name', None)
self._ex_force_service_type = kwargs.get('ex_force_service_type', None)
self._ex_force_service_name = kwargs.get('ex_force_service_name', None)
self._ex_force_service_region = kwargs.get('ex_force_service_region',
None)
def openstack_connection_kwargs(self):
"""
:rtype: ``dict``
"""
rv = {}
if self._ex_force_base_url:
rv['ex_force_base_url'] = self._ex_force_base_url
if self._ex_force_auth_token:
rv['ex_force_auth_token'] = self._ex_force_auth_token
if self._ex_force_auth_url:
rv['ex_force_auth_url'] = self._ex_force_auth_url
if self._ex_force_auth_version:
rv['ex_force_auth_version'] = self._ex_force_auth_version
if self._ex_token_scope:
rv['ex_token_scope'] = self._ex_token_scope
if self._ex_domain_name:
rv['ex_domain_name'] = self._ex_domain_name
if self._ex_tenant_name:
rv['ex_tenant_name'] = self._ex_tenant_name
if self._ex_force_service_type:
rv['ex_force_service_type'] = self._ex_force_service_type
if self._ex_force_service_name:
rv['ex_force_service_name'] = self._ex_force_service_name
if self._ex_force_service_region:
rv['ex_force_service_region'] = self._ex_force_service_region
return rv
|
|
# Copyright 2013 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import datetime
import glob
import heapq
import logging
import os
import os.path
import re
import shutil
import subprocess as subprocess
import sys
import tempfile
import time
from catapult_base import support_binaries
from telemetry.core import exceptions
from telemetry.core import util
from telemetry.internal.backends import browser_backend
from telemetry.internal.backends.chrome import chrome_browser_backend
from telemetry.internal.util import path
def ParseCrashpadDateTime(date_time_str):
# Python strptime does not support time zone parsing, strip it.
date_time_parts = date_time_str.split()
if len(date_time_parts) >= 3:
date_time_str = ' '.join(date_time_parts[:2])
return datetime.datetime.strptime(date_time_str, '%Y-%m-%d %H:%M:%S')
class DesktopBrowserBackend(chrome_browser_backend.ChromeBrowserBackend):
"""The backend for controlling a locally-executed browser instance, on Linux,
Mac or Windows.
"""
def __init__(self, desktop_platform_backend, browser_options, executable,
flash_path, is_content_shell, browser_directory,
output_profile_path, extensions_to_load):
super(DesktopBrowserBackend, self).__init__(
desktop_platform_backend,
supports_tab_control=not is_content_shell,
supports_extensions=not is_content_shell,
browser_options=browser_options,
output_profile_path=output_profile_path,
extensions_to_load=extensions_to_load)
# Initialize fields so that an explosion during init doesn't break in Close.
self._proc = None
self._tmp_profile_dir = None
self._tmp_output_file = None
self._executable = executable
if not self._executable:
raise Exception('Cannot create browser, no executable found!')
assert not flash_path or os.path.exists(flash_path)
self._flash_path = flash_path
self._is_content_shell = is_content_shell
if len(extensions_to_load) > 0 and is_content_shell:
raise browser_backend.ExtensionsNotSupportedException(
'Content shell does not support extensions.')
self._browser_directory = browser_directory
self._port = None
self._tmp_minidump_dir = tempfile.mkdtemp()
self._crash_service = None
self._SetupProfile()
def _SetupProfile(self):
if not self.browser_options.dont_override_profile:
if self._output_profile_path:
self._tmp_profile_dir = self._output_profile_path
else:
self._tmp_profile_dir = tempfile.mkdtemp()
profile_dir = self.browser_options.profile_dir
if profile_dir:
assert self._tmp_profile_dir != profile_dir
if self._is_content_shell:
logging.critical('Profiles cannot be used with content shell')
sys.exit(1)
logging.info("Using profile directory:'%s'." % profile_dir)
shutil.rmtree(self._tmp_profile_dir)
shutil.copytree(profile_dir, self._tmp_profile_dir)
if self.browser_options.use_devtools_active_port:
# No matter whether we're using an existing profile directory or
# creating a new one, always delete the well-known file containing
# the active DevTools port number.
port_file = self._GetDevToolsActivePortPath()
if os.path.isfile(port_file):
try:
os.remove(port_file)
except Exception as e:
logging.critical('Unable to remove DevToolsActivePort file: %s' % e)
sys.exit(1)
def _GetDevToolsActivePortPath(self):
return os.path.join(self.profile_directory, 'DevToolsActivePort')
def _GetCrashServicePipeName(self):
# Ensure a unique pipe name by using the name of the temp dir.
return r'\\.\pipe\%s_service' % os.path.basename(self._tmp_minidump_dir)
def _StartCrashService(self):
os_name = self.browser.platform.GetOSName()
if os_name != 'win':
return None
arch_name = self.browser.platform.GetArchName()
command = support_binaries.FindPath('crash_service', arch_name, os_name)
if not command:
logging.warning('crash_service.exe not found for %s %s',
arch_name, os_name)
return None
return subprocess.Popen([
command,
'--no-window',
'--dumps-dir=%s' % self._tmp_minidump_dir,
'--pipe-name=%s' % self._GetCrashServicePipeName()])
def _GetCdbPath(self):
possible_paths = (
'Debugging Tools For Windows',
'Debugging Tools For Windows (x86)',
'Debugging Tools For Windows (x64)',
os.path.join('Windows Kits', '8.0', 'Debuggers', 'x86'),
os.path.join('Windows Kits', '8.0', 'Debuggers', 'x64'),
os.path.join('win_toolchain', 'vs2013_files', 'win8sdk', 'Debuggers',
'x86'),
os.path.join('win_toolchain', 'vs2013_files', 'win8sdk', 'Debuggers',
'x64'),
)
for possible_path in possible_paths:
app_path = os.path.join(possible_path, 'cdb.exe')
app_path = path.FindInstalledWindowsApplication(app_path)
if app_path:
return app_path
return None
def HasBrowserFinishedLaunching(self):
# In addition to the functional check performed by the base class, quickly
# check if the browser process is still alive.
if not self.IsBrowserRunning():
raise exceptions.ProcessGoneException(
"Return code: %d" % self._proc.returncode)
if self.browser_options.use_devtools_active_port:
# The Telemetry user selected the new code path to start DevTools on
# an ephemeral port. Wait for the well-known file containing the port
# number to exist.
port_file = self._GetDevToolsActivePortPath()
if not os.path.isfile(port_file):
# File isn't ready yet. Return false. Will retry.
return False
# Attempt to avoid reading the file until it's populated.
got_port = False
try:
if os.stat(port_file).st_size > 0:
with open(port_file) as f:
port_string = f.read()
self._port = int(port_string)
logging.info('Discovered ephemeral port %s' % self._port)
got_port = True
except Exception:
# Both stat and open can throw exceptions.
pass
if not got_port:
# File isn't ready yet. Return false. Will retry.
return False
return super(DesktopBrowserBackend, self).HasBrowserFinishedLaunching()
def GetBrowserStartupArgs(self):
args = super(DesktopBrowserBackend, self).GetBrowserStartupArgs()
if self.browser_options.use_devtools_active_port:
self._port = 0
else:
self._port = util.GetUnreservedAvailableLocalPort()
logging.info('Requested remote debugging port: %d' % self._port)
args.append('--remote-debugging-port=%i' % self._port)
args.append('--enable-crash-reporter-for-testing')
if not self._is_content_shell:
args.append('--window-size=1280,1024')
if self._flash_path:
args.append('--ppapi-flash-path=%s' % self._flash_path)
if not self.browser_options.dont_override_profile:
args.append('--user-data-dir=%s' % self._tmp_profile_dir)
return args
def Start(self):
assert not self._proc, 'Must call Close() before Start()'
args = [self._executable]
args.extend(self.GetBrowserStartupArgs())
if self.browser_options.startup_url:
args.append(self.browser_options.startup_url)
env = os.environ.copy()
env['CHROME_HEADLESS'] = '1' # Don't upload minidumps.
env['BREAKPAD_DUMP_LOCATION'] = self._tmp_minidump_dir
env['CHROME_BREAKPAD_PIPE_NAME'] = self._GetCrashServicePipeName()
self._crash_service = self._StartCrashService()
logging.debug('Starting Chrome %s', args)
if not self.browser_options.show_stdout:
self._tmp_output_file = tempfile.NamedTemporaryFile('w', 0)
self._proc = subprocess.Popen(
args, stdout=self._tmp_output_file, stderr=subprocess.STDOUT, env=env)
else:
self._proc = subprocess.Popen(args, env=env)
try:
self._WaitForBrowserToComeUp()
self._InitDevtoolsClientBackend()
if self._supports_extensions:
self._WaitForExtensionsToLoad()
except:
self.Close()
raise
@property
def pid(self):
if self._proc:
return self._proc.pid
return None
@property
def browser_directory(self):
return self._browser_directory
@property
def profile_directory(self):
return self._tmp_profile_dir
def IsBrowserRunning(self):
return self._proc and self._proc.poll() == None
def GetStandardOutput(self):
if not self._tmp_output_file:
if self.browser_options.show_stdout:
# This can happen in the case that loading the Chrome binary fails.
# We print rather than using logging here, because that makes a
# recursive call to this function.
print >> sys.stderr, "Can't get standard output with --show-stdout"
return ''
self._tmp_output_file.flush()
try:
with open(self._tmp_output_file.name) as f:
return f.read()
except IOError:
return ''
def _GetMostRecentCrashpadMinidump(self):
os_name = self.browser.platform.GetOSName()
arch_name = self.browser.platform.GetArchName()
crashpad_database_util = support_binaries.FindPath(
'crashpad_database_util', arch_name, os_name)
if not crashpad_database_util:
return None
report_output = subprocess.check_output([
crashpad_database_util, '--database=' + self._tmp_minidump_dir,
'--show-pending-reports', '--show-completed-reports',
'--show-all-report-info'])
last_indentation = -1
reports_list = []
report_dict = {}
for report_line in report_output.splitlines():
# Report values are grouped together by the same indentation level.
current_indentation = 0
for report_char in report_line:
if not report_char.isspace():
break
current_indentation += 1
# Decrease in indentation level indicates a new report is being printed.
if current_indentation >= last_indentation:
report_key, report_value = report_line.split(':', 1)
if report_value:
report_dict[report_key.strip()] = report_value.strip()
elif report_dict:
try:
report_time = ParseCrashpadDateTime(report_dict['Creation time'])
report_path = report_dict['Path'].strip()
reports_list.append((report_time, report_path))
except (ValueError, KeyError) as e:
logging.warning('Crashpad report expected valid keys'
' "Path" and "Creation time": %s', e)
finally:
report_dict = {}
last_indentation = current_indentation
# Include the last report.
if report_dict:
try:
report_time = ParseCrashpadDateTime(report_dict['Creation time'])
report_path = report_dict['Path'].strip()
reports_list.append((report_time, report_path))
except (ValueError, KeyError) as e:
logging.warning('Crashpad report expected valid keys'
' "Path" and "Creation time": %s', e)
if reports_list:
_, most_recent_report_path = max(reports_list)
return most_recent_report_path
return None
def _GetMostRecentMinidump(self):
# Crashpad dump layout will be the standard eventually, check it first.
most_recent_dump = self._GetMostRecentCrashpadMinidump()
# Typical breakpad format is simply dump files in a folder.
if not most_recent_dump:
dumps = glob.glob(os.path.join(self._tmp_minidump_dir, '*.dmp'))
if dumps:
most_recent_dump = heapq.nlargest(1, dumps, os.path.getmtime)[0]
# As a sanity check, make sure the crash dump is recent.
if (most_recent_dump and
os.path.getmtime(most_recent_dump) < (time.time() - (5 * 60))):
logging.warning('Crash dump is older than 5 minutes. May not be correct.')
return most_recent_dump
def _IsExecutableStripped(self):
if self.browser.platform.GetOSName() == 'mac':
symbols = subprocess.check_output(['/usr/bin/nm', self._executable])
num_symbols = len(symbols.splitlines())
# We assume that if there are more than 10 symbols the executable is not
# stripped.
return num_symbols < 10
else:
return False
def _GetStackFromMinidump(self, minidump):
os_name = self.browser.platform.GetOSName()
if os_name == 'win':
cdb = self._GetCdbPath()
if not cdb:
logging.warning('cdb.exe not found.')
return None
output = subprocess.check_output([cdb, '-y', self._browser_directory,
'-c', '.ecxr;k30;q', '-z', minidump])
# cdb output can start the stack with "ChildEBP", "Child-SP", and possibly
# other things we haven't seen yet. If we can't find the start of the
# stack, include output from the beginning.
stack_start = 0
stack_start_match = re.search("^Child(?:EBP|-SP)", output, re.MULTILINE)
if stack_start_match:
stack_start = stack_start_match.start()
stack_end = output.find('quit:')
return output[stack_start:stack_end]
arch_name = self.browser.platform.GetArchName()
stackwalk = support_binaries.FindPath(
'minidump_stackwalk', arch_name, os_name)
if not stackwalk:
logging.warning('minidump_stackwalk binary not found.')
return None
with open(minidump, 'rb') as infile:
minidump += '.stripped'
with open(minidump, 'wb') as outfile:
outfile.write(''.join(infile.read().partition('MDMP')[1:]))
symbols_path = os.path.join(self._tmp_minidump_dir, 'symbols')
symbols = glob.glob(os.path.join(self._browser_directory, '*.breakpad*'))
if symbols:
for symbol in sorted(symbols, key=os.path.getmtime, reverse=True):
if not os.path.isfile(symbol):
continue
with open(symbol, 'r') as f:
fields = f.readline().split()
if not fields:
continue
sha = fields[3]
binary = ' '.join(fields[4:])
symbol_path = os.path.join(symbols_path, binary, sha)
if os.path.exists(symbol_path):
continue
os.makedirs(symbol_path)
shutil.copyfile(symbol, os.path.join(symbol_path, binary + '.sym'))
else:
# On some platforms generating the symbol table can be very time
# consuming, skip it if there's nothing to dump.
if self._IsExecutableStripped():
logging.info('%s appears to be stripped, skipping symbol dump.' % (
self._executable))
return
logging.info('Dumping breakpad symbols.')
generate_breakpad_symbols_path = os.path.join(
util.GetChromiumSrcDir(), "components", "crash",
"tools", "generate_breakpad_symbols.py")
cmd = [
sys.executable,
generate_breakpad_symbols_path,
'--binary=%s' % self._executable,
'--symbols-dir=%s' % symbols_path,
'--build-dir=%s' % self._browser_directory,
]
try:
subprocess.check_output(cmd, stderr=open(os.devnull, 'w'))
except subprocess.CalledProcessError:
logging.warning('Failed to execute "%s"' % ' '.join(cmd))
return None
return subprocess.check_output([stackwalk, minidump, symbols_path],
stderr=open(os.devnull, 'w'))
def GetStackTrace(self):
most_recent_dump = self._GetMostRecentMinidump()
if not most_recent_dump:
return 'No crash dump found. Returning browser stdout:\n' + (
self.GetStandardOutput())
logging.info('minidump found: %s' % most_recent_dump)
stack = self._GetStackFromMinidump(most_recent_dump)
if not stack:
return 'Failed to symbolize minidump. Returning browser stdout:\n' + (
self.GetStandardOutput())
return stack
def __del__(self):
self.Close()
def _TryCooperativeShutdown(self):
if self.browser.platform.IsCooperativeShutdownSupported():
# Ideally there would be a portable, cooperative shutdown
# mechanism for the browser. This seems difficult to do
# correctly for all embedders of the content API. The only known
# problem with unclean shutdown of the browser process is on
# Windows, where suspended child processes frequently leak. For
# now, just solve this particular problem. See Issue 424024.
if self.browser.platform.CooperativelyShutdown(self._proc, "chrome"):
try:
util.WaitFor(lambda: not self.IsBrowserRunning(), timeout=5)
logging.info('Successfully shut down browser cooperatively')
except exceptions.TimeoutException as e:
logging.warning('Failed to cooperatively shutdown. ' +
'Proceeding to terminate: ' + str(e))
def Close(self):
super(DesktopBrowserBackend, self).Close()
if self.IsBrowserRunning():
self._TryCooperativeShutdown()
# Shutdown politely if the profile may be used again.
if self._output_profile_path and self.IsBrowserRunning():
self._proc.terminate()
try:
util.WaitFor(lambda: not self.IsBrowserRunning(), timeout=5)
self._proc = None
except exceptions.TimeoutException:
logging.warning('Failed to gracefully shutdown. Proceeding to kill.')
# Shutdown aggressively if the above failed or if the profile is temporary.
if self.IsBrowserRunning():
self._proc.kill()
self._proc = None
if self._crash_service:
self._crash_service.kill()
self._crash_service = None
if self._output_profile_path:
# If we need the output then double check that it exists.
if not (self._tmp_profile_dir and os.path.exists(self._tmp_profile_dir)):
raise Exception("No profile directory generated by Chrome: '%s'." %
self._tmp_profile_dir)
else:
# If we don't need the profile after the run then cleanup.
if self._tmp_profile_dir and os.path.exists(self._tmp_profile_dir):
shutil.rmtree(self._tmp_profile_dir, ignore_errors=True)
self._tmp_profile_dir = None
if self._tmp_output_file:
self._tmp_output_file.close()
self._tmp_output_file = None
|
|
import pytest
from mock import Mock, patch, call
from ramses import generators
from .fixtures import engine_mock
class TestGenerationHelpers(object):
@patch('ramses.models.get_existing_model')
def test_setup_data_model_existing_model(self, mock_get):
mock_get.return_value = 1
model, auth_model = generators.setup_data_model('foo', 'Bar')
assert not auth_model
assert model == 1
mock_get.assert_called_once_with('Bar')
@patch('ramses.generators.resource_schema')
@patch('ramses.models.get_existing_model')
def test_setup_data_model_no_schema(self, mock_get, mock_schema):
mock_get.return_value = None
mock_schema.return_value = None
with pytest.raises(Exception) as ex:
generators.setup_data_model('foo', 'Bar')
assert str(ex.value) == 'Missing schema for model `Bar`'
mock_get.assert_called_once_with('Bar')
mock_schema.assert_called_once_with('foo')
@patch('ramses.generators.resource_schema')
@patch('ramses.models.generate_model_cls')
@patch('ramses.models.get_existing_model')
def test_setup_data_model_success(self, mock_get, mock_gen, mock_schema):
mock_get.return_value = None
mock_schema.return_value = {'field1': 'val1'}
model = generators.setup_data_model('foo', 'Bar')
mock_get.assert_called_once_with('Bar')
mock_schema.assert_called_once_with('foo')
mock_gen.assert_called_once_with(
schema={'field1': 'val1'},
model_name='Bar',
raml_resource='foo')
assert model == mock_gen()
@patch('ramses.generators.setup_data_model')
def test_handle_model_generation_value_err(self, mock_set):
mock_set.side_effect = ValueError('strange error')
with pytest.raises(ValueError) as ex:
generators.handle_model_generation('foo', '/stories')
assert str(ex.value) == 'Story: strange error'
mock_set.assert_called_once_with('foo', 'Story')
@patch('ramses.generators.setup_data_model')
def test_handle_model_generation(self, mock_set):
mock_set.return_value = ('Foo1', True)
model, auth_model = generators.handle_model_generation(
'foo', '/stories')
mock_set.assert_called_once_with('foo', 'Story')
assert model == 'Foo1'
assert auth_model
@patch('ramses.generators.configure_resources')
def test_generate_server(self, mock_conf):
parsed_raml = Mock(resources={'/users': 1})
config = Mock()
generators.generate_server(parsed_raml, config)
mock_conf.assert_called_once_with(
config=config, raml_resources={'/users': 1},
parsed_raml=parsed_raml)
class TestGenerateModels(object):
@patch('ramses.generators.is_dynamic_uri')
def test_no_resources(self, mock_dyn):
generators.generate_models(config=1, raml_resources={})
assert not mock_dyn.called
@patch('ramses.generators.handle_model_generation')
def test_dynamic_uri(self, mock_handle):
generators.generate_models(
config=1, raml_resources={'/{id}': Mock(resources={})})
assert not mock_handle.called
@patch('ramses.generators.attr_subresource')
@patch('ramses.generators.handle_model_generation')
def test_attr_subresource(self, mock_handle, mock_attr):
mock_attr.return_value = True
resource = Mock(resources={})
generators.generate_models(
config=1, raml_resources={'/stories': resource})
assert not mock_handle.called
mock_attr.assert_called_once_with(resource, 'stories')
@patch('ramses.generators.attr_subresource')
@patch('ramses.generators.handle_model_generation')
def test_non_auth_model(self, mock_handle, mock_attr):
mock_attr.return_value = False
mock_handle.return_value = ('Foo', False)
config = Mock()
resource = Mock(resources={})
generators.generate_models(
config=config, raml_resources={'/stories': resource})
mock_attr.assert_called_once_with(resource, 'stories')
mock_handle.assert_called_once_with(resource, 'stories')
assert config.registry.auth_model != 'Foo'
@patch('ramses.generators.attr_subresource')
@patch('ramses.generators.handle_model_generation')
def test_auth_model(self, mock_handle, mock_attr):
mock_attr.return_value = False
mock_handle.return_value = ('Foo', True)
config = Mock()
resource = Mock(resources={})
generators.generate_models(
config=config, raml_resources={'/stories': resource})
mock_attr.assert_called_once_with(resource, 'stories')
mock_handle.assert_called_once_with(resource, 'stories')
assert config.registry.auth_model == 'Foo'
@patch('ramses.generators.attr_subresource')
@patch('ramses.generators.handle_model_generation')
def test_recursion(self, mock_handle, mock_attr):
mock_attr.return_value = False
mock_handle.return_value = ('Foo', False)
resource1 = Mock(resources={})
resource2 = Mock(resources={'/users': resource1})
generators.generate_models(
config='', raml_resources={'/stories': resource2})
mock_handle.assert_has_calls([
call(resource2, 'stories'),
call(resource1, 'users'),
])
class TestConfigureResources(object):
@patch('ramses.generators.is_restful_uri')
def test_no_raml_resources(self, mock_rest):
config = Mock()
generators.configure_resources(
config, raml_resources={}, parsed_raml='',
parent_resource=None)
assert not config.get_root_resource.called
assert not mock_rest.called
def test_no_parent_not_restful_uri(self):
config = Mock()
with pytest.raises(ValueError) as ex:
generators.configure_resources(
config, raml_resources={'/foo/bar': ''},
parsed_raml='', parent_resource=None)
expected = 'Resource URI `/foo/bar` is not RESTful'
assert str(ex.value) == expected
config.get_root_resource.assert_called_once_with()
@patch('ramses.generators.singular_subresource')
def test_root_dynamic_resource(self, mock_singular):
config = Mock()
resource = Mock(resource={})
with pytest.raises(Exception) as ex:
generators.configure_resources(
config, raml_resources={'/{id}': resource},
parsed_raml='', parent_resource=None)
assert "Top-level resources can't be dynamic" in str(ex.value)
assert not mock_singular.called
@patch('ramses.generators.singular_subresource')
def test_dynamic_resource(self, mock_singular):
resource = Mock(resources={})
parent_resource = Mock(uid=1)
generators.configure_resources(
None, raml_resources={'/{id}': resource},
parsed_raml='', parent_resource=parent_resource)
assert not mock_singular.called
@patch('ramses.generators.singular_subresource')
@patch('ramses.generators.attr_subresource')
@patch('ramses.models.get_existing_model')
@patch('ramses.generators.generate_acl')
@patch('ramses.generators.resource_view_attrs')
@patch('ramses.generators.generate_rest_view')
def test_full_run(
self, generate_view, view_attrs, generate_acl, get_model,
attr_res, singular_res):
model_cls = Mock()
model_cls.pk_field.return_value = 'my_id'
attr_res.return_value = False
singular_res.return_value = False
get_model.return_value = model_cls
config = Mock()
resource = Mock(resources={})
parent_resource = Mock(uid=1)
generators.configure_resources(
config, raml_resources={'/stories': resource},
parsed_raml='foo', parent_resource=parent_resource)
singular_res.assert_called_once_with(resource, 'stories')
attr_res.assert_called_once_with(resource, 'stories')
get_model.assert_called_once_with('Story')
generate_acl.assert_called_once_with(
context_cls=model_cls,
raml_resource=resource,
parsed_raml='foo'
)
view_attrs.assert_called_once_with(resource, False)
generate_view.assert_called_once_with(
model_cls=model_cls,
attrs=view_attrs(),
attr_view=False,
singular=False
)
parent_resource.add.assert_called_once_with(
'story', 'stories',
id_name='stories_my_id',
factory=generate_acl(),
view=generate_view()
)
@patch('ramses.generators.singular_subresource')
@patch('ramses.generators.attr_subresource')
@patch('ramses.models.get_existing_model')
@patch('ramses.generators.generate_acl')
@patch('ramses.generators.resource_view_attrs')
@patch('ramses.generators.generate_rest_view')
def test_full_run_singular(
self, generate_view, view_attrs, generate_acl, get_model,
attr_res, singular_res):
attr_res.return_value = False
singular_res.return_value = True
config = Mock()
resource = Mock(resources={})
parent_resource = Mock(uid=1)
parent_resource.view._model_class.pk_field.return_value = 'other_id'
generators.configure_resources(
config, raml_resources={'/stories': resource},
parsed_raml='foo', parent_resource=parent_resource)
singular_res.assert_called_once_with(resource, 'stories')
attr_res.assert_called_once_with(resource, 'stories')
get_model.assert_called_once_with('Story')
generate_acl.assert_called_once_with(
context_cls=parent_resource.view._model_class,
raml_resource=resource,
parsed_raml='foo'
)
view_attrs.assert_called_once_with(resource, True)
generate_view.assert_called_once_with(
model_cls=parent_resource.view._model_class,
attrs=view_attrs(),
attr_view=False,
singular=True
)
parent_resource.add.assert_called_once_with(
'story',
factory=generate_acl(),
view=generate_view()
)
assert generate_view()._singular_model == get_model()
|
|
from __future__ import unicode_literals
import logging
import time
from django.conf import settings
from django.contrib.auth.decorators import login_required
from django.contrib.auth.models import User
from django.contrib.sites.models import Site
from django.core.exceptions import MultipleObjectsReturned, ObjectDoesNotExist
from django.db.models import Q
from django.http import (Http404,
HttpResponse,
HttpResponseNotFound,
HttpResponseNotModified,
HttpResponseRedirect)
from django.shortcuts import (get_object_or_404, get_list_or_404,
render_to_response)
from django.template.context import RequestContext
from django.template.loader import render_to_string
from django.utils import six, timezone
from django.utils.decorators import method_decorator
from django.utils.html import escape
from django.utils.http import http_date
from django.utils.safestring import mark_safe
from django.utils.timezone import utc
from django.utils.translation import ugettext_lazy as _
from djblets.siteconfig.models import SiteConfiguration
from djblets.util.dates import get_latest_timestamp
from djblets.util.decorators import augment_method_from
from djblets.util.http import (encode_etag, set_last_modified,
set_etag, etag_if_none_match)
from reviewboard.accounts.decorators import (check_login_required,
valid_prefs_required)
from reviewboard.accounts.models import ReviewRequestVisit, Profile
from reviewboard.attachments.models import (FileAttachment,
get_latest_file_attachments)
from reviewboard.avatars import avatar_services
from reviewboard.diffviewer.diffutils import (convert_to_unicode,
get_file_chunks_in_range,
get_last_header_before_line,
get_last_line_number_in_diff,
get_original_file,
get_patched_file)
from reviewboard.diffviewer.models import DiffSet
from reviewboard.diffviewer.views import (DiffFragmentView,
DiffViewerView,
DownloadPatchErrorBundleView,
exception_traceback_string)
from reviewboard.hostingsvcs.bugtracker import BugTracker
from reviewboard.reviews.ui.screenshot import LegacyScreenshotReviewUI
from reviewboard.reviews.context import (comment_counts,
diffsets_with_comments,
has_comments_in_diffsets_excluding,
interdiffs_with_comments,
make_review_request_context)
from reviewboard.reviews.detail import (ChangeEntry,
InitialStatusUpdatesEntry,
ReviewEntry,
ReviewRequestPageData)
from reviewboard.reviews.features import status_updates_feature
from reviewboard.reviews.markdown_utils import is_rich_text_default_for_user
from reviewboard.reviews.models import (Comment,
Review,
ReviewRequest,
Screenshot)
from reviewboard.reviews.ui.base import FileAttachmentReviewUI
from reviewboard.scmtools.errors import FileNotFoundError
from reviewboard.scmtools.models import Repository
from reviewboard.site.decorators import check_local_site_access
from reviewboard.site.urlresolvers import local_site_reverse
#
# Helper functions
#
def _render_permission_denied(
request,
template_name='reviews/review_request_permission_denied.html'):
"""Renders a Permission Denied error for this review request."""
response = render_to_response(template_name, RequestContext(request))
response.status_code = 403
return response
def _find_review_request_object(review_request_id, local_site):
"""Finds a review request given an ID and an optional LocalSite name.
If a local site is passed in on the URL, we want to look up the review
request using the local_id instead of the pk. This allows each LocalSite
configured to have its own review request ID namespace starting from 1.
"""
q = ReviewRequest.objects.all()
if local_site:
q = q.filter(local_site=local_site,
local_id=review_request_id)
else:
q = q.filter(pk=review_request_id)
try:
q = q.select_related('submitter', 'repository')
return q.get()
except ReviewRequest.DoesNotExist:
raise Http404
def _find_review_request(request, review_request_id, local_site):
"""Finds a review request matching an ID, checking user access permissions.
If the review request is accessible by the user, we return
(ReviewRequest, None). Otherwise, we return (None, response).
"""
review_request = _find_review_request_object(review_request_id, local_site)
if review_request.is_accessible_by(request.user):
return review_request, None
else:
return None, _render_permission_denied(request)
def _query_for_diff(review_request, user, revision, draft):
"""
Queries for a diff based on several parameters.
If the draft does not exist, this throws an Http404 exception.
"""
# Normalize the revision, since it might come in as a string.
if revision:
revision = int(revision)
# This will try to grab the diff associated with a draft if the review
# request has an associated draft and is either the revision being
# requested or no revision is being requested.
if (draft and draft.diffset_id and
(revision is None or draft.diffset.revision == revision)):
return draft.diffset
query = Q(history=review_request.diffset_history_id)
# Grab a revision if requested.
if revision is not None:
query = query & Q(revision=revision)
try:
return DiffSet.objects.filter(query).latest()
except DiffSet.DoesNotExist:
raise Http404
def _get_social_page_image_url(file_attachments):
"""Return the URL to an image used for social media sharing.
This will look for the first attachment in a list of attachments that can
be used to represent the review request on social media sites and chat
services. If a suitable attachment is found, its URL will be returned.
Args:
file_attachments (list of reviewboard.attachments.models.FileAttachment):
A list of file attachments used on a review request.
Returns:
unicode:
The URL to the first image file attachment, if found, or ``None``
if no suitable attachments were found.
"""
for file_attachment in file_attachments:
if file_attachment.mimetype.startswith('image/'):
return file_attachment.get_absolute_url()
return None
def build_diff_comment_fragments(
comments, context,
comment_template_name='reviews/diff_comment_fragment.html',
error_template_name='diffviewer/diff_fragment_error.html',
lines_of_context=None,
show_controls=False):
comment_entries = []
had_error = False
siteconfig = SiteConfiguration.objects.get_current()
if lines_of_context is None:
lines_of_context = [0, 0]
for comment in comments:
try:
max_line = get_last_line_number_in_diff(context, comment.filediff,
comment.interfilediff)
first_line = max(1, comment.first_line - lines_of_context[0])
last_line = min(comment.last_line + lines_of_context[1], max_line)
num_lines = last_line - first_line + 1
chunks = list(get_file_chunks_in_range(context,
comment.filediff,
comment.interfilediff,
first_line,
num_lines))
content = render_to_string(comment_template_name, {
'comment': comment,
'header': get_last_header_before_line(context,
comment.filediff,
comment.interfilediff,
first_line),
'chunks': chunks,
'domain': Site.objects.get_current().domain,
'domain_method': siteconfig.get('site_domain_method'),
'lines_of_context': lines_of_context,
'expandable_above': show_controls and first_line != 1,
'expandable_below': show_controls and last_line != max_line,
'collapsible': lines_of_context != [0, 0],
'lines_above': first_line - 1,
'lines_below': max_line - last_line,
'first_line': first_line,
})
except Exception as e:
content = exception_traceback_string(
None, e, error_template_name, {
'comment': comment,
'file': {
'depot_filename': comment.filediff.source_file,
'index': None,
'filediff': comment.filediff,
},
'domain': Site.objects.get_current().domain,
'domain_method': siteconfig.get("site_domain_method"),
})
# It's bad that we failed, and we'll return a 500, but we'll
# still return content for anything we have. This will prevent any
# caching.
had_error = True
chunks = []
comment_entries.append({
'comment': comment,
'html': content,
'chunks': chunks,
})
return had_error, comment_entries
#
# View functions
#
@check_login_required
@valid_prefs_required
def root(request, local_site_name=None):
"""Handles the root URL of Review Board or a Local Site.
If the user is authenticated, this will redirect to their Dashboard.
Otherwise, they'll be redirected to the All Review Requests page.
Either page may then redirect for login or show a Permission Denied,
depending on the settings.
"""
if request.user.is_authenticated():
url_name = 'dashboard'
else:
url_name = 'all-review-requests'
return HttpResponseRedirect(
local_site_reverse(url_name, local_site_name=local_site_name))
@login_required
@check_local_site_access
def new_review_request(request,
local_site=None,
template_name='reviews/new_review_request.html'):
"""Displays the New Review Request UI.
This handles the creation of a review request based on either an existing
changeset or the provided information.
"""
valid_repos = []
repos = Repository.objects.accessible(request.user, local_site=local_site)
if local_site:
local_site_name = local_site.name
else:
local_site_name = ''
for repo in repos.order_by('name'):
try:
scmtool = repo.get_scmtool()
valid_repos.append({
'id': repo.id,
'name': repo.name,
'scmtool_name': scmtool.name,
'supports_post_commit': repo.supports_post_commit,
'local_site_name': local_site_name,
'files_only': False,
'requires_change_number': scmtool.supports_pending_changesets,
'requires_basedir': not scmtool.diffs_use_absolute_paths,
})
except Exception:
logging.exception('Error loading SCMTool for repository "%s" '
'(ID %d)',
repo.name, repo.id)
valid_repos.insert(0, {
'id': '',
'name': _('(None - File attachments only)'),
'scmtool_name': '',
'supports_post_commit': False,
'files_only': True,
'local_site_name': local_site_name,
})
return render_to_response(template_name, RequestContext(request, {
'repos': valid_repos,
}))
@check_login_required
@check_local_site_access
def review_detail(request,
review_request_id,
local_site=None,
template_name='reviews/review_detail.html'):
"""Render the main review request page."""
review_request, response = _find_review_request(
request, review_request_id, local_site)
status_updates_enabled = status_updates_feature.is_enabled(
local_site=local_site)
if not review_request:
return response
data = ReviewRequestPageData(review_request, request)
data.query_data_pre_etag()
visited = None
last_visited = 0
starred = False
if request.user.is_authenticated():
try:
visited, visited_is_new = \
ReviewRequestVisit.objects.get_or_create(
user=request.user, review_request=review_request)
last_visited = visited.timestamp.replace(tzinfo=utc)
except ReviewRequestVisit.DoesNotExist:
# Somehow, this visit was seen as created but then not
# accessible. We need to log this and then continue on.
logging.error('Unable to get or create ReviewRequestVisit '
'for user "%s" on review request at %s',
request.user.username,
review_request.get_absolute_url())
# If the review request is public and pending review and if the user
# is logged in, mark that they've visited this review request.
if (review_request.public and
review_request.status == review_request.PENDING_REVIEW):
visited.timestamp = timezone.now()
visited.save()
try:
profile = request.user.get_profile()
starred_review_requests = \
profile.starred_review_requests.filter(pk=review_request.pk)
starred = (starred_review_requests.count() > 0)
except Profile.DoesNotExist:
pass
last_activity_time, updated_object = \
review_request.get_last_activity(data.diffsets, data.reviews)
if data.draft:
draft_timestamp = data.draft.last_updated
else:
draft_timestamp = ''
blocks = review_request.get_blocks()
# Find out if we can bail early. Generate an ETag for this.
etag = encode_etag(
'%s:%s:%s:%s:%s:%s:%s:%s:%s:%s' %
(request.user, last_activity_time, draft_timestamp,
data.latest_review_timestamp,
review_request.last_review_activity_timestamp,
is_rich_text_default_for_user(request.user),
[r.pk for r in blocks],
starred, visited and visited.visibility, settings.AJAX_SERIAL))
if etag_if_none_match(request, etag):
return HttpResponseNotModified()
data.query_data_post_etag()
entries = []
reviews_entry_map = {}
changedescs_entry_map = {}
# Now that we have the list of public reviews and all that metadata,
# being processing them and adding entries for display in the page.
for review in data.reviews:
if (review.public and
not review.is_reply() and
not (status_updates_enabled and
hasattr(review, 'status_update'))):
# Mark as collapsed if the review is older than the latest
# change, assuming there's no reply newer than last_visited.
latest_reply = data.latest_timestamps_by_review_id.get(review.pk)
collapsed = (
review.timestamp < data.latest_changedesc_timestamp and
not (latest_reply and
last_visited and
last_visited < latest_reply))
entry = ReviewEntry(request, review_request, review, collapsed,
data)
reviews_entry_map[review.pk] = entry
entries.append(entry)
# Add entries for the change descriptions.
for changedesc in data.changedescs:
# Mark as collapsed if the change is older than a newer change.
collapsed = (changedesc.timestamp < data.latest_changedesc_timestamp)
entry = ChangeEntry(request, review_request, changedesc, collapsed,
data)
changedescs_entry_map[changedesc.id] = entry
entries.append(entry)
if status_updates_enabled:
initial_status_entry = InitialStatusUpdatesEntry(
review_request, collapsed=(len(data.changedescs) > 0),
data=data)
for update in data.status_updates:
if update.change_description_id is not None:
entry = changedescs_entry_map[update.change_description_id]
else:
entry = initial_status_entry
entry.add_update(update)
if update.review_id is not None:
reviews_entry_map[update.review_id] = entry
else:
initial_status_entry = None
# Now that we have entries for all the reviews, go through all the comments
# and add them to those entries.
for comment in data.comments:
review = comment.review_obj
if review.is_reply():
# This is a reply to a comment.
base_reply_to_id = comment.review_obj.base_reply_to_id
assert review.pk not in reviews_entry_map
assert base_reply_to_id in reviews_entry_map
# Make sure that any review boxes containing draft replies are
# always expanded.
if comment.is_reply() and not review.public:
reviews_entry_map[base_reply_to_id].collapsed = False
elif review.public:
# This is a comment on a public review.
assert review.id in reviews_entry_map
entry = reviews_entry_map[review.id]
entry.add_comment(comment._type, comment)
if status_updates_enabled:
initial_status_entry.finalize()
for entry in entries:
entry.finalize()
# Finally, sort all the entries (reviews and change descriptions) by their
# timestamp.
entries.sort(key=lambda item: item.timestamp)
close_description, close_description_rich_text = \
review_request.get_close_description()
siteconfig = SiteConfiguration.objects.get_current()
# Time to render the page!
file_attachments = \
get_latest_file_attachments(data.active_file_attachments)
social_page_image_url = _get_social_page_image_url(
file_attachments)
context_data = make_review_request_context(request, review_request, {
'blocks': blocks,
'draft': data.draft,
'review_request_details': data.review_request_details,
'review_request_visit': visited,
'send_email': siteconfig.get('mail_send_review_mail'),
'initial_status_entry': initial_status_entry,
'entries': entries,
'last_activity_time': last_activity_time,
'review': review_request.get_pending_review(request.user),
'request': request,
'close_description': close_description,
'close_description_rich_text': close_description_rich_text,
'issue_counts': data.issue_counts,
'issues': data.issues,
'file_attachments': file_attachments,
'all_file_attachments': data.all_file_attachments,
'screenshots': data.active_screenshots,
'social_page_image_url': social_page_image_url,
'social_page_title': (
'Review Request #%s: %s'
% (review_request.display_id, review_request.summary)
),
})
response = render_to_response(template_name,
RequestContext(request, context_data))
set_etag(response, etag)
return response
class ReviewsDiffViewerView(DiffViewerView):
"""Renders the diff viewer for a review request.
This wraps the base DiffViewerView to display a diff for the given
review request and the given diff revision or range.
The view expects the following parameters to be provided:
* review_request_id
- The ID of the ReviewRequest containing the diff to render.
The following may also be provided:
* revision
- The DiffSet revision to render.
* interdiff_revision
- The second DiffSet revision in an interdiff revision range.
* local_site
- The LocalSite the ReviewRequest must be on, if any.
See DiffViewerView's documentation for the accepted query parameters.
"""
@method_decorator(check_login_required)
@method_decorator(check_local_site_access)
@augment_method_from(DiffViewerView)
def dispatch(self, *args, **kwargs):
pass
def get(self, request, review_request_id, revision=None,
interdiff_revision=None, local_site=None):
"""Handles GET requests for this view.
This will look up the review request and DiffSets, given the
provided information, and pass them to the parent class for rendering.
"""
review_request, response = \
_find_review_request(request, review_request_id, local_site)
if not review_request:
return response
self.review_request = review_request
self.draft = review_request.get_draft(request.user)
self.diffset = _query_for_diff(review_request, request.user,
revision, self.draft)
self.interdiffset = None
if interdiff_revision and interdiff_revision != revision:
# An interdiff revision was specified. Try to find a matching
# diffset.
self.interdiffset = _query_for_diff(review_request, request.user,
interdiff_revision, self.draft)
return super(ReviewsDiffViewerView, self).get(
request, self.diffset, self.interdiffset)
def get_context_data(self, *args, **kwargs):
"""Calculates additional context data for rendering.
This provides some additional data used for rendering the diff
viewer. This data is more specific to the reviewing functionality,
as opposed to the data calculated by DiffViewerView.get_context_data,
which is more focused on the actual diff.
"""
# Try to find an existing pending review of this diff from the
# current user.
pending_review = \
self.review_request.get_pending_review(self.request.user)
has_draft_diff = self.draft and self.draft.diffset
is_draft_diff = has_draft_diff and self.draft.diffset == self.diffset
is_draft_interdiff = (has_draft_diff and self.interdiffset and
self.draft.diffset == self.interdiffset)
# Get the list of diffsets. We only want to calculate this once.
diffsets = self.review_request.get_diffsets()
num_diffs = len(diffsets)
if num_diffs > 0:
latest_diffset = diffsets[-1]
else:
latest_diffset = None
if self.draft and self.draft.diffset:
num_diffs += 1
last_activity_time, updated_object = \
self.review_request.get_last_activity(diffsets)
review_request_details = self.draft or self.review_request
file_attachments = list(review_request_details.get_file_attachments())
screenshots = list(review_request_details.get_screenshots())
latest_file_attachments = get_latest_file_attachments(file_attachments)
social_page_image_url = _get_social_page_image_url(
latest_file_attachments)
# Compute the lists of comments based on filediffs and interfilediffs.
# We do this using the 'through' table so that we can select_related
# the reviews and comments.
comments = {}
q = Comment.review.related.field.rel.through.objects.filter(
review__review_request=self.review_request)
q = q.select_related()
for obj in q:
comment = obj.comment
comment.review_obj = obj.review
key = (comment.filediff_id, comment.interfilediff_id)
comments.setdefault(key, []).append(comment)
close_description, close_description_rich_text = \
self.review_request.get_close_description()
context = super(ReviewsDiffViewerView, self).get_context_data(
*args, **kwargs)
siteconfig = SiteConfiguration.objects.get_current()
context.update({
'close_description': close_description,
'close_description_rich_text': close_description_rich_text,
'diffsets': diffsets,
'latest_diffset': latest_diffset,
'review': pending_review,
'review_request_details': review_request_details,
'draft': self.draft,
'last_activity_time': last_activity_time,
'file_attachments': latest_file_attachments,
'all_file_attachments': file_attachments,
'screenshots': screenshots,
'comments': comments,
'send_email': siteconfig.get('mail_send_review_mail'),
'social_page_image_url': social_page_image_url,
'social_page_title': (
'Diff for Review Request #%s: %s'
% (self.review_request.display_id,
review_request_details.summary)
),
})
context.update(
make_review_request_context(self.request,
self.review_request,
is_diff_view=True))
diffset_pair = context['diffset_pair']
context['diff_context'].update({
'num_diffs': num_diffs,
'comments_hint': {
'has_other_comments': has_comments_in_diffsets_excluding(
pending_review, diffset_pair),
'diffsets_with_comments': [
{
'revision': diffset_info['diffset'].revision,
'is_current': diffset_info['is_current'],
}
for diffset_info in diffsets_with_comments(
pending_review, diffset_pair)
],
'interdiffs_with_comments': [
{
'old_revision': pair['diffset'].revision,
'new_revision': pair['interdiff'].revision,
'is_current': pair['is_current'],
}
for pair in interdiffs_with_comments(
pending_review, diffset_pair)
],
},
})
context['diff_context']['revision'].update({
'latest_revision': (latest_diffset.revision
if latest_diffset else None),
'is_draft_diff': is_draft_diff,
'is_draft_interdiff': is_draft_interdiff,
})
files = []
for f in context['files']:
filediff = f['filediff']
interfilediff = f['interfilediff']
data = {
'newfile': f['newfile'],
'binary': f['binary'],
'deleted': f['deleted'],
'id': f['filediff'].pk,
'depot_filename': f['depot_filename'],
'dest_filename': f['dest_filename'],
'dest_revision': f['dest_revision'],
'revision': f['revision'],
'filediff': {
'id': filediff.id,
'revision': filediff.diffset.revision,
},
'index': f['index'],
'comment_counts': comment_counts(self.request.user, comments,
filediff, interfilediff),
}
if interfilediff:
data['interfilediff'] = {
'id': interfilediff.id,
'revision': interfilediff.diffset.revision,
}
if f['force_interdiff']:
data['force_interdiff'] = True
data['interdiff_revision'] = f['force_interdiff_revision']
files.append(data)
context['diff_context']['files'] = files
return context
@check_login_required
@check_local_site_access
def raw_diff(request, review_request_id, revision=None, local_site=None):
"""
Displays a raw diff of all the filediffs in a diffset for the
given review request.
"""
review_request, response = \
_find_review_request(request, review_request_id, local_site)
if not review_request:
return response
draft = review_request.get_draft(request.user)
diffset = _query_for_diff(review_request, request.user, revision, draft)
tool = review_request.repository.get_scmtool()
data = tool.get_parser('').raw_diff(diffset)
resp = HttpResponse(data, content_type='text/x-patch')
if diffset.name == 'diff':
filename = "rb%d.patch" % review_request.display_id
else:
filename = six.text_type(diffset.name).encode('ascii', 'ignore')
# Content-Disposition headers containing commas break on Chrome 16 and
# newer. To avoid this, replace any commas in the filename with an
# underscore. Was bug 3704.
filename = filename.replace(',', '_')
resp['Content-Disposition'] = 'attachment; filename=%s' % filename
set_last_modified(resp, diffset.timestamp)
return resp
@check_login_required
@check_local_site_access
def comment_diff_fragments(
request,
review_request_id,
comment_ids,
template_name='reviews/load_diff_comment_fragments.js',
comment_template_name='reviews/diff_comment_fragment.html',
error_template_name='diffviewer/diff_fragment_error.html',
local_site=None):
"""
Returns the fragment representing the parts of a diff referenced by the
specified list of comment IDs. This is used to allow batch lazy-loading
of these diff fragments based on filediffs, since they may not be cached
and take time to generate.
"""
comments = get_list_or_404(Comment, pk__in=comment_ids.split(","))
latest_timestamp = get_latest_timestamp(comment.timestamp
for comment in comments)
etag = encode_etag(
'%s:%s:%s'
% (comment_ids, latest_timestamp, settings.TEMPLATE_SERIAL))
if etag_if_none_match(request, etag):
response = HttpResponseNotModified()
else:
# While we don't actually need the review request, we still want to do
# this lookup in order to get the permissions checking.
review_request, response = \
_find_review_request(request, review_request_id, local_site)
if not review_request:
return response
lines_of_context = request.GET.get('lines_of_context', '0,0')
container_prefix = request.GET.get('container_prefix')
try:
lines_of_context = [int(i) for i in lines_of_context.split(',')]
# Ensure that we have 2 values for lines_of_context. If only one is
# given, assume it is both the before and after context. If more
# than two are given, only consider the first two. If somehow we
# get no lines of context value, we will default to [0, 0].
if len(lines_of_context) == 1:
lines_of_context.append(lines_of_context[0])
elif len(lines_of_context) > 2:
lines_of_context = lines_of_context[0:2]
elif len(lines_of_context) == 0:
raise ValueError
except ValueError:
lines_of_context = [0, 0]
context = RequestContext(request, {
'comment_entries': [],
'container_prefix': container_prefix,
'queue_name': request.GET.get('queue'),
'show_controls': request.GET.get('show_controls', False),
})
had_error, context['comment_entries'] = (
build_diff_comment_fragments(
comments,
context,
comment_template_name,
error_template_name,
lines_of_context=lines_of_context,
show_controls='draft' not in container_prefix))
page_content = render_to_string(template_name, context)
response = HttpResponse(
page_content,
content_type='application/javascript')
if had_error:
return response
set_etag(response, etag)
response['Expires'] = http_date(time.time() + 60 * 60 * 24 * 365) # 1 year
return response
class ReviewsDiffFragmentView(DiffFragmentView):
"""Renders a fragment from a file in the diff viewer.
Displays just a fragment of a diff or interdiff owned by the given
review request. The fragment is identified by the chunk index in the
diff.
The view expects the following parameters to be provided:
* review_request_id
- The ID of the ReviewRequest containing the diff to render.
* revision
- The DiffSet revision to render.
* filediff_id
- The ID of the FileDiff within the DiffSet.
The following may also be provided:
* interdiff_revision
- The second DiffSet revision in an interdiff revision range.
* chunk_index
- The index (0-based) of the chunk to render. If left out, the
entire file will be rendered.
* local_site
- The LocalSite the ReviewRequest must be on, if any.
See DiffFragmentView's documentation for the accepted query parameters.
"""
@method_decorator(check_login_required)
@method_decorator(check_local_site_access)
@augment_method_from(DiffFragmentView)
def dispatch(self, *args, **kwargs):
pass
def process_diffset_info(self, review_request_id, revision,
interdiff_revision=None, local_site=None,
*args, **kwargs):
"""Process and return information on the desired diff.
The diff IDs and other data passed to the view can be processed and
converted into DiffSets. A dictionary with the DiffSet and FileDiff
information will be returned.
If the review request cannot be accessed by the user, an HttpResponse
will be returned instead.
"""
self.review_request, response = \
_find_review_request(self.request, review_request_id, local_site)
if not self.review_request:
return response
user = self.request.user
draft = self.review_request.get_draft(user)
if interdiff_revision is not None:
interdiffset = _query_for_diff(self.review_request, user,
interdiff_revision, draft)
else:
interdiffset = None
diffset = _query_for_diff(self.review_request, user, revision, draft)
return super(ReviewsDiffFragmentView, self).process_diffset_info(
diffset_or_id=diffset,
interdiffset_or_id=interdiffset,
**kwargs)
def create_renderer(self, diff_file, *args, **kwargs):
"""Creates the DiffRenderer for this fragment.
This will augment the renderer for binary files by looking up
file attachments, if review UIs are involved, disabling caching.
"""
renderer = super(ReviewsDiffFragmentView, self).create_renderer(
diff_file=diff_file, *args, **kwargs)
if diff_file['binary']:
# Determine the file attachments to display in the diff viewer,
# if any.
filediff = diff_file['filediff']
interfilediff = diff_file['interfilediff']
orig_attachment = None
modified_attachment = None
if diff_file['force_interdiff']:
orig_attachment = self._get_diff_file_attachment(filediff)
modified_attachment = \
self._get_diff_file_attachment(interfilediff)
else:
modified_attachment = self._get_diff_file_attachment(filediff)
if not diff_file['is_new_file']:
orig_attachment = \
self._get_diff_file_attachment(filediff, False)
diff_review_ui = None
diff_review_ui_html = None
orig_review_ui = None
orig_review_ui_html = None
modified_review_ui = None
modified_review_ui_html = None
if orig_attachment:
orig_review_ui = orig_attachment.review_ui
if modified_attachment:
modified_review_ui = modified_attachment.review_ui
# See if we're able to generate a diff review UI for these files.
if (orig_review_ui and modified_review_ui and
orig_review_ui.__class__ is modified_review_ui.__class__ and
modified_review_ui.supports_diffing):
# Both files are able to be diffed by this review UI.
# We'll display a special diff review UI instead of two
# side-by-side review UIs.
diff_review_ui = modified_review_ui
diff_review_ui.set_diff_against(orig_attachment)
diff_review_ui_html = \
self._render_review_ui(diff_review_ui, False)
else:
# We won't be showing a diff of these files. Instead, just
# grab the review UIs and render them.
orig_review_ui_html = \
self._render_review_ui(orig_review_ui)
modified_review_ui_html = \
self._render_review_ui(modified_review_ui)
if (diff_review_ui_html or orig_review_ui_html or
modified_review_ui_html):
# Don't cache the view, because the Review UI may care about
# state that we can't anticipate. At the least, it may have
# comments or other data that change between renders, and we
# don't want that to go stale.
renderer.allow_caching = False
renderer.extra_context.update({
'orig_diff_file_attachment': orig_attachment,
'modified_diff_file_attachment': modified_attachment,
'orig_attachment_review_ui_html': orig_review_ui_html,
'modified_attachment_review_ui_html': modified_review_ui_html,
'diff_attachment_review_ui_html': diff_review_ui_html,
})
renderer.extra_context.update(
self._get_download_links(renderer, diff_file))
return renderer
def get_context_data(self, **kwargs):
return {
'review_request': self.review_request,
}
def _get_download_links(self, renderer, diff_file):
if diff_file['binary']:
orig_attachment = \
renderer.extra_context['orig_diff_file_attachment']
modified_attachment = \
renderer.extra_context['modified_diff_file_attachment']
if orig_attachment:
download_orig_url = orig_attachment.get_absolute_url()
else:
download_orig_url = None
if modified_attachment:
download_modified_url = modified_attachment.get_absolute_url()
else:
download_modified_url = None
else:
filediff = diff_file['filediff']
interfilediff = diff_file['interfilediff']
diffset = filediff.diffset
if interfilediff:
orig_url_name = 'download-modified-file'
modified_revision = interfilediff.diffset.revision
modified_filediff_id = interfilediff.pk
else:
orig_url_name = 'download-orig-file'
modified_revision = diffset.revision
modified_filediff_id = filediff.pk
download_orig_url = local_site_reverse(
orig_url_name,
request=self.request,
kwargs={
'review_request_id': self.review_request.display_id,
'revision': diffset.revision,
'filediff_id': filediff.pk,
})
download_modified_url = local_site_reverse(
'download-modified-file',
request=self.request,
kwargs={
'review_request_id': self.review_request.display_id,
'revision': modified_revision,
'filediff_id': modified_filediff_id,
})
return {
'download_orig_url': download_orig_url,
'download_modified_url': download_modified_url,
}
def _render_review_ui(self, review_ui, inline_only=True):
"""Renders the review UI for a file attachment."""
if review_ui and (not inline_only or review_ui.allow_inline):
return mark_safe(review_ui.render_to_string(self.request))
return None
def _get_diff_file_attachment(self, filediff, use_modified=True):
"""Fetch the FileAttachment associated with a FileDiff.
This will query for the FileAttachment based on the provided filediff,
and set the retrieved diff file attachment to a variable whose name is
provided as an argument to this tag.
If 'use_modified' is True, the FileAttachment returned will be from the
modified version of the new file. Otherwise, it's the original file
that's being modified.
If no matching FileAttachment is found or if there is more than one
FileAttachment associated with one FileDiff, None is returned. An error
is logged in the latter case.
"""
if not filediff:
return None
try:
return FileAttachment.objects.get_for_filediff(filediff,
use_modified)
except ObjectDoesNotExist:
return None
except MultipleObjectsReturned:
# Only one FileAttachment should be associated with a FileDiff
logging.error('More than one FileAttachments associated with '
'FileDiff %s',
filediff.pk,
exc_info=1)
return None
class ReviewsDownloadPatchErrorBundleView(DownloadPatchErrorBundleView,
ReviewsDiffFragmentView):
"""A view to download the patch error bundle.
This view allows users to download a bundle containing data to help debug
issues when a patch fails to apply. The bundle will contain the diff, the
original file (as returned by the SCMTool), and the rejects file, if
applicable.
"""
@check_login_required
@check_local_site_access
def preview_review_request_email(
request,
review_request_id,
format,
text_template_name='notifications/review_request_email.txt',
html_template_name='notifications/review_request_email.html',
changedesc_id=None,
local_site=None):
"""
Previews the e-mail message that would be sent for an initial
review request or an update.
This is mainly used for debugging.
"""
if not settings.DEBUG:
raise Http404
review_request, response = \
_find_review_request(request, review_request_id, local_site)
if not review_request:
return response
extra_context = {}
if changedesc_id:
changedesc = get_object_or_404(review_request.changedescs,
pk=changedesc_id)
extra_context['change_text'] = changedesc.text
extra_context['changes'] = changedesc.fields_changed
siteconfig = SiteConfiguration.objects.get_current()
if format == 'text':
template_name = text_template_name
mimetype = 'text/plain'
elif format == 'html':
template_name = html_template_name
mimetype = 'text/html'
else:
raise Http404
return HttpResponse(render_to_string(
template_name,
RequestContext(request, dict({
'review_request': review_request,
'user': request.user,
'domain': Site.objects.get_current().domain,
'domain_method': siteconfig.get("site_domain_method"),
}, **extra_context)),
), content_type=mimetype)
@check_login_required
@check_local_site_access
def preview_review_email(request, review_request_id, review_id, format,
text_template_name='notifications/review_email.txt',
html_template_name='notifications/review_email.html',
extra_context={},
local_site=None):
"""
Previews the e-mail message that would be sent for a review of a
review request.
This is mainly used for debugging.
"""
if not settings.DEBUG:
raise Http404
review_request, response = \
_find_review_request(request, review_request_id, local_site)
if not review_request:
return response
review = get_object_or_404(Review, pk=review_id,
review_request=review_request)
siteconfig = SiteConfiguration.objects.get_current()
review.ordered_comments = \
review.comments.order_by('filediff', 'first_line')
if format == 'text':
template_name = text_template_name
mimetype = 'text/plain'
elif format == 'html':
template_name = html_template_name
mimetype = 'text/html'
else:
raise Http404
context = {
'review_request': review_request,
'review': review,
'user': request.user,
'domain': Site.objects.get_current().domain,
'domain_method': siteconfig.get("site_domain_method"),
}
context.update(extra_context)
has_error, context['comment_entries'] = \
build_diff_comment_fragments(
review.ordered_comments, context,
"notifications/email_diff_comment_fragment.html")
return HttpResponse(
render_to_string(template_name, RequestContext(request, context)),
content_type=mimetype)
@check_login_required
@check_local_site_access
def preview_reply_email(request, review_request_id, review_id, reply_id,
format,
text_template_name='notifications/reply_email.txt',
html_template_name='notifications/reply_email.html',
local_site=None):
"""
Previews the e-mail message that would be sent for a reply to a
review of a review request.
This is mainly used for debugging.
"""
if not settings.DEBUG:
raise Http404
review_request, response = \
_find_review_request(request, review_request_id, local_site)
if not review_request:
return response
review = get_object_or_404(Review, pk=review_id,
review_request=review_request)
reply = get_object_or_404(Review, pk=reply_id, base_reply_to=review)
siteconfig = SiteConfiguration.objects.get_current()
reply.ordered_comments = \
reply.comments.order_by('filediff', 'first_line')
if format == 'text':
template_name = text_template_name
mimetype = 'text/plain'
elif format == 'html':
template_name = html_template_name
mimetype = 'text/html'
else:
raise Http404
context = {
'review_request': review_request,
'review': review,
'reply': reply,
'user': request.user,
'domain': Site.objects.get_current().domain,
'domain_method': siteconfig.get("site_domain_method"),
}
has_error, context['comment_entries'] = \
build_diff_comment_fragments(
reply.ordered_comments, context,
"notifications/email_diff_comment_fragment.html")
return HttpResponse(
render_to_string(template_name, RequestContext(request, context)),
content_type=mimetype)
@check_login_required
@check_local_site_access
def review_file_attachment(request, review_request_id, file_attachment_id,
file_attachment_diff_id=None, local_site=None):
"""Displays a file attachment with a review UI."""
review_request, response = \
_find_review_request(request, review_request_id, local_site)
if not review_request:
return response
draft = review_request.get_draft(request.user)
# Make sure the attachment returned is part of either the review request
# or an accessible draft.
review_request_q = (Q(review_request=review_request) |
Q(inactive_review_request=review_request))
if draft:
review_request_q |= Q(drafts=draft) | Q(inactive_drafts=draft)
file_attachment = get_object_or_404(
FileAttachment,
Q(pk=file_attachment_id) & review_request_q)
review_ui = file_attachment.review_ui
if not review_ui:
review_ui = FileAttachmentReviewUI(review_request, file_attachment)
if file_attachment_diff_id:
file_attachment_revision = get_object_or_404(
FileAttachment,
Q(pk=file_attachment_diff_id) &
Q(attachment_history=file_attachment.attachment_history) &
review_request_q)
review_ui.set_diff_against(file_attachment_revision)
try:
is_enabled_for = review_ui.is_enabled_for(
user=request.user,
review_request=review_request,
file_attachment=file_attachment)
except Exception as e:
logging.error('Error when calling is_enabled_for for '
'FileAttachmentReviewUI %r: %s',
review_ui, e, exc_info=1)
is_enabled_for = False
if review_ui and is_enabled_for:
return review_ui.render_to_response(request)
else:
raise Http404
@check_login_required
@check_local_site_access
def view_screenshot(request, review_request_id, screenshot_id,
local_site=None):
"""
Displays a screenshot, along with any comments that were made on it.
"""
review_request, response = \
_find_review_request(request, review_request_id, local_site)
if not review_request:
return response
draft = review_request.get_draft(request.user)
# Make sure the screenshot returned is part of either the review request
# or an accessible draft.
review_request_q = (Q(review_request=review_request) |
Q(inactive_review_request=review_request))
if draft:
review_request_q |= Q(drafts=draft) | Q(inactive_drafts=draft)
screenshot = get_object_or_404(Screenshot,
Q(pk=screenshot_id) & review_request_q)
review_ui = LegacyScreenshotReviewUI(review_request, screenshot)
return review_ui.render_to_response(request)
@check_login_required
@check_local_site_access
def user_infobox(request, username,
template_name='accounts/user_infobox.html',
local_site=None):
"""Displays a user info popup.
This is meant to be embedded in other pages, rather than being
a standalone page.
"""
from reviewboard.extensions.hooks import UserInfoboxHook
user = get_object_or_404(User, username=username)
try:
profile = user.get_profile()
show_profile = not profile.is_private
timezone = profile.timezone
except Profile.DoesNotExist:
show_profile = True
timezone = 'UTC'
etag_data = [
user.first_name,
user.last_name,
user.email,
six.text_type(user.last_login),
six.text_type(settings.TEMPLATE_SERIAL),
six.text_type(show_profile),
timezone,
]
if avatar_services.avatars_enabled:
avatar_service = avatar_services.for_user(user)
if avatar_service:
etag_data.extend(avatar_service.get_etag_data(user))
for hook in UserInfoboxHook.hooks:
try:
etag_data.append(hook.get_etag_data(user, request, local_site))
except Exception as e:
logging.exception('Error when running UserInfoboxHook.'
'get_etag_data method in extension "%s": %s',
hook.extension.id, e)
etag = encode_etag(':'.join(etag_data))
if etag_if_none_match(request, etag):
return HttpResponseNotModified()
extra_content = []
for hook in UserInfoboxHook.hooks:
try:
extra_content.append(hook.render(user, request, local_site))
except Exception as e:
logging.exception('Error when running UserInfoboxHook.'
'render method in extension "%s": %s',
hook.extension.id, e)
review_requests_url = local_site_reverse('user', local_site=local_site,
args=[username])
reviews_url = local_site_reverse('user-grid', local_site=local_site,
args=[username, 'reviews'])
response = render_to_response(template_name, RequestContext(request, {
'extra_content': mark_safe(''.join(extra_content)),
'full_name': user.get_full_name(),
'infobox_user': user,
'review_requests_url': review_requests_url,
'reviews_url': reviews_url,
'show_profile': show_profile,
'timezone': timezone,
}))
set_etag(response, etag)
return response
@check_login_required
@check_local_site_access
def bug_url(request, review_request_id, bug_id, local_site=None):
"""Redirects user to bug tracker issue page."""
review_request, response = \
_find_review_request(request, review_request_id, local_site)
if not review_request:
return response
# Need to create a custom HttpResponse because a non-HTTP url scheme will
# cause HttpResponseRedirect to fail with a "Disallowed Redirect".
response = HttpResponse(status=302)
response['Location'] = review_request.repository.bug_tracker % bug_id
return response
@check_login_required
@check_local_site_access
def bug_infobox(request, review_request_id, bug_id,
template_name='reviews/bug_infobox.html',
local_site=None):
"""Displays a bug info popup.
This is meant to be embedded in other pages, rather than being
a standalone page.
"""
review_request, response = \
_find_review_request(request, review_request_id, local_site)
if not review_request:
return response
repository = review_request.repository
bug_tracker = repository.bug_tracker_service
if not bug_tracker:
return HttpResponseNotFound(_('Unable to find bug tracker service'))
if not isinstance(bug_tracker, BugTracker):
return HttpResponseNotFound(
_('Bug tracker %s does not support metadata') % bug_tracker.name)
bug_info = bug_tracker.get_bug_info(repository, bug_id)
bug_description = bug_info['description']
bug_summary = bug_info['summary']
bug_status = bug_info['status']
if not bug_summary and not bug_description:
return HttpResponseNotFound(
_('No bug metadata found for bug %(bug_id)s on bug tracker '
'%(bug_tracker)s') % {
'bug_id': bug_id,
'bug_tracker': bug_tracker.name,
})
# Don't do anything for single newlines, but treat two newlines as a
# paragraph break.
escaped_description = escape(bug_description).replace('\n\n', '<br/><br/>')
return render_to_response(template_name, RequestContext(request, {
'bug_id': bug_id,
'bug_description': mark_safe(escaped_description),
'bug_status': bug_status,
'bug_summary': bug_summary
}))
def _download_diff_file(modified, request, review_request_id, revision,
filediff_id, local_site=None):
"""Downloads an original or modified file from a diff.
This will fetch the file from a FileDiff, optionally patching it,
and return the result as an HttpResponse.
"""
review_request, response = \
_find_review_request(request, review_request_id, local_site)
if not review_request:
return response
draft = review_request.get_draft(request.user)
diffset = _query_for_diff(review_request, request.user, revision, draft)
filediff = get_object_or_404(diffset.files, pk=filediff_id)
encoding_list = diffset.repository.get_encoding_list()
try:
data = get_original_file(filediff, request, encoding_list)
except FileNotFoundError:
logging.exception(
'Could not retrieve file "%s" (revision %s) for filediff ID %s',
filediff.dest_detail, revision, filediff_id)
raise Http404
if modified:
data = get_patched_file(data, filediff, request)
data = convert_to_unicode(data, encoding_list)[1]
return HttpResponse(data, content_type='text/plain; charset=utf-8')
@check_login_required
@check_local_site_access
def download_orig_file(*args, **kwargs):
"""Downloads an original file from a diff."""
return _download_diff_file(False, *args, **kwargs)
@check_login_required
@check_local_site_access
def download_modified_file(*args, **kwargs):
"""Downloads a modified file from a diff."""
return _download_diff_file(True, *args, **kwargs)
|
|
import time
import string
import hashlib
import binascii
import logging.handlers
import codecs
import os.path
import datetime
import random
import mimetypes
import re
from itertools import groupby
import tg
import pylons
import webob.multidict
from formencode import Invalid
from tg.decorators import before_validate
from pylons import response
from pylons import tmpl_context as c
from paste.httpheaders import CACHE_CONTROL, EXPIRES
from webhelpers.html import literal
from webob import exc
from pygments.formatters import HtmlFormatter
from ew import jinja2_ew as ew
from ming.utils import LazyProperty
def permanent_redirect(url):
try:
tg.redirect(url)
except exc.HTTPFound, err:
raise exc.HTTPMovedPermanently(location=err.location)
def cache_forever():
headers = [
(k,v) for k,v in response.headers.items()
if k.lower() not in ('pragma', 'cache-control') ]
delta = CACHE_CONTROL.apply(
headers,
public=True,
max_age=60*60*24*365)
EXPIRES.update(headers, delta=delta)
response.headers.pop('cache-control', None)
response.headers.pop('pragma', None)
response.headers.update(headers)
class memoize_on_request(object):
def __init__(self, *key, **kwargs):
self.key = key
self.include_func_in_key = kwargs.pop(
'include_func_in_key', False)
assert not kwargs, 'Extra args'
def __call__(self, func):
def wrapper(*args, **kwargs):
cache = c.memoize_cache
if self.include_func_in_key:
key = (func, self.key, args, tuple(kwargs.iteritems()))
else:
key = (self.key, args, tuple(kwargs.iteritems()))
if key in cache:
result = cache[key]
else:
result = cache[key] = func(*args, **kwargs)
return result
wrapper.__name__ = 'wrap(%s)' % func.__name__
return wrapper
def guess_mime_type(filename):
'''Guess MIME type based on filename.
Applies heuristics, tweaks, and defaults in centralized manner.
'''
# Consider changing to strict=False
content_type = mimetypes.guess_type(filename, strict=True)
if content_type[0]:
content_type = content_type[0]
else:
content_type = 'application/octet-stream'
return content_type
class ConfigProxy(object):
'''Wrapper for loading config values at module-scope so we don't
have problems when a module is imported before tg.config is initialized
'''
def __init__(self, **kw):
self._kw = kw
def __getattr__(self, k):
return tg.config[self._kw[k]]
class lazy_logger(object):
'''Lazy instatiation of a logger, to ensure that it does not get
created before logging is configured (which would make it disabled)'''
def __init__(self, name):
self._name = name
@LazyProperty
def _logger(self):
return logging.getLogger(self._name)
def __getattr__(self, name):
if name.startswith('_'): raise AttributeError, name
return getattr(self._logger, name)
class TimedRotatingHandler(logging.handlers.BaseRotatingHandler):
def __init__(self, strftime_pattern):
self.pattern = strftime_pattern
self.last_filename = self.current_filename()
logging.handlers.BaseRotatingHandler.__init__(self, self.last_filename, 'a')
def current_filename(self):
return os.path.abspath(datetime.datetime.utcnow().strftime(self.pattern))
def shouldRollover(self, record):
'Inherited from BaseRotatingFileHandler'
return self.current_filename() != self.last_filename
def doRollover(self):
self.stream.close()
self.baseFilename = self.current_filename()
if self.encoding:
self.stream = codecs.open(self.baseFilename, 'w', self.encoding)
else:
self.stream = open(self.baseFilename, 'w')
class StatsHandler(TimedRotatingHandler):
fields=('action', 'action_type', 'tool_type', 'tool_mount', 'project', 'neighborhood',
'username', 'url', 'ip_address')
def __init__(self,
strftime_pattern,
module='allura',
page=1,
**kwargs):
self.page = page
self.module = module
TimedRotatingHandler.__init__(self, strftime_pattern)
def emit(self, record):
if not hasattr(record, 'action'):
return
kwpairs = dict(
module=self.module,
page=self.page)
for name in self.fields:
kwpairs[name] = getattr(record, name, None)
kwpairs.update(getattr(record, 'kwpairs', {}))
record.kwpairs = ','.join(
'%s=%s' % (k,v) for k,v in sorted(kwpairs.iteritems())
if v is not None)
record.exc_info = None # Never put tracebacks in the rtstats log
TimedRotatingHandler.emit(self, record)
def chunked_find(cls, query=None, pagesize=1024, sort_key=None, sort_dir=1):
if query is None: query = {}
page = 0
while True:
q = cls.query.find(query).skip(pagesize * page).limit(pagesize)
if sort_key:
q.sort(sort_key, sort_dir)
results = (q.all())
if not results: break
yield results
page += 1
def lsub_utf8(s, n):
'''Useful for returning n bytes of a UTF-8 string, rather than characters'''
while len(s) > n:
k = n
while (ord(s[k]) & 0xc0) == 0x80:
k -= 1
return s[:k]
return s
def chunked_list(l, n):
""" Yield successive n-sized chunks from l.
"""
for i in xrange(0, len(l), n):
yield l[i:i+n]
def chunked_iter(iterable, max_size):
'''return iterable 'chunks' from the iterable of max size max_size'''
eiter = enumerate(iterable)
keyfunc = lambda (i,x): i//max_size
for _, chunk in groupby(eiter, keyfunc):
yield (x for i,x in chunk)
class AntiSpam(object):
'''Helper class for bot-protecting forms'''
honey_field_template=string.Template('''<p class="$honey_class">
<label for="$fld_id">You seem to have CSS turned off.
Please don't fill out this field.</label><br>
<input id="$fld_id" name="$fld_name" type="text"><br></p>''')
def __init__(self, request=None, num_honey=2):
self.num_honey = num_honey
if request is None or request.method == 'GET':
self.request = pylons.request
self.timestamp = int(time.time())
self.spinner = self.make_spinner()
self.timestamp_text = str(self.timestamp)
self.spinner_text = self._wrap(self.spinner)
else:
self.request = request
self.timestamp_text = request.params['timestamp']
self.spinner_text = request.params['spinner']
self.timestamp = int(self.timestamp_text)
self.spinner = self._unwrap(self.spinner_text)
self.spinner_ord = map(ord, self.spinner)
self.random_padding = [ random.randint(0,255) for x in self.spinner ]
self.honey_class = self.enc(self.spinner_text, css_safe=True)
# The counter is to ensure that multiple forms in the same page
# don't end up with the same id. Instead of doing:
#
# honey0, honey1
# which just relies on 0..num_honey we include a counter
# which is incremented every time extra_fields is called:
#
# honey00, honey 01, honey10, honey11
self.counter = 0
@staticmethod
def _wrap(s):
'''Encode a string to make it HTML id-safe (starts with alpha, includes
only digits, hyphens, underscores, colons, and periods). Luckily, base64
encoding doesn't use hyphens, underscores, colons, nor periods, so we'll
use these characters to replace its plus, slash, equals, and newline.
'''
tx_tbl = string.maketrans('+/', '-_')
s = binascii.b2a_base64(s)
s = s.rstrip('=\n')
s = s.translate(tx_tbl)
s = 'X' + s
return s
@staticmethod
def _unwrap(s):
tx_tbl = string.maketrans('-_', '+/')
s = s[1:]
s = str(s).translate(tx_tbl)
i = len(s) % 4
if i > 0:
s += '=' * (4 - i)
s = binascii.a2b_base64(s + '\n')
return s
def enc(self, plain, css_safe=False):
'''Stupid fieldname encryption. Not production-grade, but
hopefully "good enough" to stop spammers. Basically just an
XOR of the spinner with the unobfuscated field name
'''
# Plain starts with its length, includes the ordinals for its
# characters, and is padded with random data
plain = ([ len(plain) ]
+ map(ord, plain)
+ self.random_padding[:len(self.spinner_ord) - len(plain) - 1])
enc = ''.join(chr(p^s) for p, s in zip(plain, self.spinner_ord))
enc = self._wrap(enc)
if css_safe:
enc = ''.join(ch for ch in enc if ch.isalpha())
return enc
def dec(self, enc):
enc = self._unwrap(enc)
enc = list(map(ord, enc))
plain = [e^s for e,s in zip(enc, self.spinner_ord)]
plain = plain[1:1+plain[0]]
plain = ''.join(map(chr, plain))
return plain
def extra_fields(self):
yield ew.HiddenField(name='timestamp', value=self.timestamp_text).display()
yield ew.HiddenField(name='spinner', value=self.spinner_text).display()
for fldno in range(self.num_honey):
fld_name = self.enc('honey%d' % (fldno))
fld_id = self.enc('honey%d%d' % (self.counter, fldno))
yield literal(self.honey_field_template.substitute(
honey_class=self.honey_class,
fld_id=fld_id,
fld_name=fld_name))
self.counter += 1
def make_spinner(self, timestamp=None):
if timestamp is None: timestamp = self.timestamp
try:
client_ip = self.request.headers.get('X_FORWARDED_FOR', self.request.remote_addr)
client_ip = client_ip.split(',')[0].strip()
except (TypeError, AttributeError), err:
client_ip = '127.0.0.1'
plain = '%d:%s:%s' % (
timestamp, client_ip, pylons.config.get('spinner_secret', 'abcdef'))
return hashlib.sha1(plain).digest()
@classmethod
def validate_request(cls, request=None, now=None, params=None):
if request is None: request = pylons.request
if params is None: params = request.params
new_params = dict(params)
if not request.method == 'GET':
new_params.pop('timestamp', None)
new_params.pop('spinner', None)
obj = cls(request)
if now is None: now = time.time()
if obj.timestamp > now + 5:
raise ValueError, 'Post from the future'
if now - obj.timestamp > 60*60:
raise ValueError, 'Post from the 1hr+ past'
if obj.spinner != obj.make_spinner(obj.timestamp):
raise ValueError, 'Bad spinner value'
for k in new_params.keys():
new_params[obj.dec(k)] = new_params.pop(k)
for fldno in range(obj.num_honey):
value = new_params.pop('honey%s' % fldno)
if value:
raise ValueError, 'Value in honeypot field: %s' % value
return new_params
@classmethod
def validate(cls, error_msg):
'''Controller decorator to raise Invalid errors if bot protection is engaged'''
def antispam_hook(remainder, params):
'''Converts various errors in validate_request to a single Invalid message'''
try:
new_params = cls.validate_request(params=params)
params.update(new_params)
except (ValueError, TypeError, binascii.Error):
raise Invalid(error_msg, params, None)
return before_validate(antispam_hook)
class TruthyCallable(object):
'''
Wraps a callable to make it truthy in a boolean context.
Assumes the callable returns a truthy value and can be called with no args.
'''
def __init__(self, callable):
self.callable = callable
def __call__(self, *args, **kw):
return self.callable(*args, **kw)
def __nonzero__(self):
return self.callable()
class CaseInsensitiveDict(dict):
def __init__(self, *args, **kwargs):
super(CaseInsensitiveDict, self).__init__(*args, **kwargs)
self._reindex()
def _reindex(self):
items = self.items()
self.clear()
self._index = {}
for k,v in items:
self[k] = v
assert len(self) == len(items), 'Duplicate (case-insensitive) key'
def __getitem__(self, name):
return super(CaseInsensitiveDict, self).__getitem__(name.lower())
def __setitem__(self, name, value):
lname = name.lower()
super(CaseInsensitiveDict, self).__setitem__(lname, value)
self._index[lname] = name
def __delitem__(self, name):
super(CaseInsensitiveDict, self).__delitem__(name.lower())
def pop(self, k, *args):
return super(CaseInsensitiveDict, self).pop(k.lower(), *args)
def popitem(self):
k,v = super(CaseInsensitiveDict, self).popitem()
return self._index[k], v
def update(self, *args, **kwargs):
super(CaseInsensitiveDict, self).update(*args, **kwargs)
self._reindex()
def postmortem_hook(etype, value, tb): # pragma no cover
import sys, pdb, traceback
try:
from IPython.ipapi import make_session; make_session()
from IPython.Debugger import Pdb
sys.stderr.write('Entering post-mortem IPDB shell\n')
p = Pdb(color_scheme='Linux')
p.reset()
p.setup(None, tb)
p.print_stack_trace()
sys.stderr.write('%s: %s\n' % ( etype, value))
p.cmdloop()
p.forget()
# p.interaction(None, tb)
except ImportError:
sys.stderr.write('Entering post-mortem PDB shell\n')
traceback.print_exception(etype, value, tb)
pdb.post_mortem(tb)
class LineAnchorCodeHtmlFormatter(HtmlFormatter):
def _wrap_pre(self, inner):
style = []
if self.prestyles:
style.append(self.prestyles)
if self.noclasses:
style.append('line-height: 125%')
style = '; '.join(style)
num = self.linenostart
yield 0, ('<pre' + (style and ' style="%s"' % style) + '>')
for tup in inner:
yield (tup[0], '<div id="l%s" class="code_block">%s</div>' % (num, tup[1]))
num += 1
yield 0, '</pre>'
def generate_code_stats(blob):
stats = {'line_count': 0,
'code_size': 0,
'data_line_count': 0}
code = blob.text
lines = code.split('\n')
stats['code_size'] = blob.size
stats['line_count'] = len(lines)
spaces = re.compile(r'^\s*$')
stats['data_line_count'] = sum([1 for l in lines if not spaces.match(l)])
return stats
|
|
#!/usr/bin/env python3
# Copyright (c) 2014-2019 The Bitcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
"""Base class for RPC testing."""
import configparser
from enum import Enum
import logging
import argparse
import os
import pdb
import random
import shutil
import subprocess
import sys
import tempfile
import time
from .authproxy import JSONRPCException
from . import coverage
from .test_node import TestNode
from .mininode import NetworkThread
from .util import (
MAX_NODES,
PortSeed,
assert_equal,
check_json_precision,
connect_nodes,
disconnect_nodes,
get_datadir_path,
initialize_datadir,
sync_blocks,
sync_mempools,
)
class TestStatus(Enum):
PASSED = 1
FAILED = 2
SKIPPED = 3
TEST_EXIT_PASSED = 0
TEST_EXIT_FAILED = 1
TEST_EXIT_SKIPPED = 77
TMPDIR_PREFIX = "bitcoin_func_test_"
class SkipTest(Exception):
"""This exception is raised to skip a test"""
def __init__(self, message):
self.message = message
class BitcoinTestMetaClass(type):
"""Metaclass for BitcoinTestFramework.
Ensures that any attempt to register a subclass of `BitcoinTestFramework`
adheres to a standard whereby the subclass overrides `set_test_params` and
`run_test` but DOES NOT override either `__init__` or `main`. If any of
those standards are violated, a ``TypeError`` is raised."""
def __new__(cls, clsname, bases, dct):
if not clsname == 'BitcoinTestFramework':
if not ('run_test' in dct and 'set_test_params' in dct):
raise TypeError("BitcoinTestFramework subclasses must override "
"'run_test' and 'set_test_params'")
if '__init__' in dct or 'main' in dct:
raise TypeError("BitcoinTestFramework subclasses may not override "
"'__init__' or 'main'")
return super().__new__(cls, clsname, bases, dct)
class BitcoinTestFramework(metaclass=BitcoinTestMetaClass):
"""Base class for a bitcoin test script.
Individual bitcoin test scripts should subclass this class and override the set_test_params() and run_test() methods.
Individual tests can also override the following methods to customize the test setup:
- add_options()
- setup_chain()
- setup_network()
- setup_nodes()
The __init__() and main() methods should not be overridden.
This class also contains various public and private helper methods."""
def __init__(self):
"""Sets test framework defaults. Do not override this method. Instead, override the set_test_params() method"""
self.chain = 'regtest'
self.setup_clean_chain = False
self.nodes = []
self.network_thread = None
self.rpc_timeout = 60 # Wait for up to 60 seconds for the RPC server to respond
self.supports_cli = True
self.bind_to_localhost_only = True
self.set_test_params()
self.parse_args()
def main(self):
"""Main function. This should not be overridden by the subclass test scripts."""
assert hasattr(self, "num_nodes"), "Test must set self.num_nodes in set_test_params()"
try:
self.setup()
self.run_test()
except JSONRPCException:
self.log.exception("JSONRPC error")
self.success = TestStatus.FAILED
except SkipTest as e:
self.log.warning("Test Skipped: %s" % e.message)
self.success = TestStatus.SKIPPED
except AssertionError:
self.log.exception("Assertion failed")
self.success = TestStatus.FAILED
except KeyError:
self.log.exception("Key error")
self.success = TestStatus.FAILED
except subprocess.CalledProcessError as e:
self.log.exception("Called Process failed with '{}'".format(e.output))
self.success = TestStatus.FAILED
except Exception:
self.log.exception("Unexpected exception caught during testing")
self.success = TestStatus.FAILED
except KeyboardInterrupt:
self.log.warning("Exiting after keyboard interrupt")
self.success = TestStatus.FAILED
finally:
exit_code = self.shutdown()
sys.exit(exit_code)
def parse_args(self):
parser = argparse.ArgumentParser(usage="%(prog)s [options]")
parser.add_argument("--nocleanup", dest="nocleanup", default=False, action="store_true",
help="Leave bitcoinds and test.* datadir on exit or error")
parser.add_argument("--noshutdown", dest="noshutdown", default=False, action="store_true",
help="Don't stop bitcoinds after the test execution")
parser.add_argument("--cachedir", dest="cachedir", default=os.path.abspath(os.path.dirname(os.path.realpath(__file__)) + "/../../cache"),
help="Directory for caching pregenerated datadirs (default: %(default)s)")
parser.add_argument("--tmpdir", dest="tmpdir", help="Root directory for datadirs")
parser.add_argument("-l", "--loglevel", dest="loglevel", default="INFO",
help="log events at this level and higher to the console. Can be set to DEBUG, INFO, WARNING, ERROR or CRITICAL. Passing --loglevel DEBUG will output all logs to console. Note that logs at all levels are always written to the test_framework.log file in the temporary test directory.")
parser.add_argument("--tracerpc", dest="trace_rpc", default=False, action="store_true",
help="Print out all RPC calls as they are made")
parser.add_argument("--portseed", dest="port_seed", default=os.getpid(), type=int,
help="The seed to use for assigning port numbers (default: current process id)")
parser.add_argument("--coveragedir", dest="coveragedir",
help="Write tested RPC commands into this directory")
parser.add_argument("--configfile", dest="configfile",
default=os.path.abspath(os.path.dirname(os.path.realpath(__file__)) + "/../../config.ini"),
help="Location of the test framework config file (default: %(default)s)")
parser.add_argument("--pdbonfailure", dest="pdbonfailure", default=False, action="store_true",
help="Attach a python debugger if test fails")
parser.add_argument("--usecli", dest="usecli", default=False, action="store_true",
help="use bitcoin-cli instead of RPC for all commands")
parser.add_argument("--perf", dest="perf", default=False, action="store_true",
help="profile running nodes with perf for the duration of the test")
parser.add_argument("--valgrind", dest="valgrind", default=False, action="store_true",
help="run nodes under the valgrind memory error detector: expect at least a ~10x slowdown, valgrind 3.14 or later required")
parser.add_argument("--randomseed", type=int,
help="set a random seed for deterministically reproducing a previous test run")
self.add_options(parser)
self.options = parser.parse_args()
def setup(self):
"""Call this method to start up the test framework object with options set."""
PortSeed.n = self.options.port_seed
check_json_precision()
self.options.cachedir = os.path.abspath(self.options.cachedir)
config = configparser.ConfigParser()
config.read_file(open(self.options.configfile))
self.config = config
self.options.bitcoind = os.getenv("BITCOIND", default=config["environment"]["BUILDDIR"] + '/src/omnicored' + config["environment"]["EXEEXT"])
self.options.bitcoincli = os.getenv("BITCOINCLI", default=config["environment"]["BUILDDIR"] + '/src/omnicore-cli' + config["environment"]["EXEEXT"])
os.environ['PATH'] = os.pathsep.join([
os.path.join(config['environment']['BUILDDIR'], 'src'),
os.path.join(config['environment']['BUILDDIR'], 'src', 'qt'),
os.environ['PATH']
])
# Set up temp directory and start logging
if self.options.tmpdir:
self.options.tmpdir = os.path.abspath(self.options.tmpdir)
os.makedirs(self.options.tmpdir, exist_ok=False)
else:
self.options.tmpdir = tempfile.mkdtemp(prefix=TMPDIR_PREFIX)
self._start_logging()
# Seed the PRNG. Note that test runs are reproducible if and only if
# a single thread accesses the PRNG. For more information, see
# https://docs.python.org/3/library/random.html#notes-on-reproducibility.
# The network thread shouldn't access random. If we need to change the
# network thread to access randomness, it should instantiate its own
# random.Random object.
seed = self.options.randomseed
if seed is None:
seed = random.randrange(sys.maxsize)
else:
self.log.debug("User supplied random seed {}".format(seed))
random.seed(seed)
self.log.debug("PRNG seed is: {}".format(seed))
self.log.debug('Setting up network thread')
self.network_thread = NetworkThread()
self.network_thread.start()
if self.options.usecli:
if not self.supports_cli:
raise SkipTest("--usecli specified but test does not support using CLI")
self.skip_if_no_cli()
self.skip_test_if_missing_module()
self.setup_chain()
self.setup_network()
self.success = TestStatus.PASSED
def shutdown(self):
"""Call this method to shut down the test framework object."""
if self.success == TestStatus.FAILED and self.options.pdbonfailure:
print("Testcase failed. Attaching python debugger. Enter ? for help")
pdb.set_trace()
self.log.debug('Closing down network thread')
self.network_thread.close()
if not self.options.noshutdown:
self.log.info("Stopping nodes")
if self.nodes:
self.stop_nodes()
else:
for node in self.nodes:
node.cleanup_on_exit = False
self.log.info("Note: bitcoinds were not stopped and may still be running")
should_clean_up = (
not self.options.nocleanup and
not self.options.noshutdown and
self.success != TestStatus.FAILED and
not self.options.perf
)
if should_clean_up:
self.log.info("Cleaning up {} on exit".format(self.options.tmpdir))
cleanup_tree_on_exit = True
elif self.options.perf:
self.log.warning("Not cleaning up dir {} due to perf data".format(self.options.tmpdir))
cleanup_tree_on_exit = False
else:
self.log.warning("Not cleaning up dir {}".format(self.options.tmpdir))
cleanup_tree_on_exit = False
if self.success == TestStatus.PASSED:
self.log.info("Tests successful")
exit_code = TEST_EXIT_PASSED
elif self.success == TestStatus.SKIPPED:
self.log.info("Test skipped")
exit_code = TEST_EXIT_SKIPPED
else:
self.log.error("Test failed. Test logging available at %s/test_framework.log", self.options.tmpdir)
self.log.error("Hint: Call {} '{}' to consolidate all logs".format(os.path.normpath(os.path.dirname(os.path.realpath(__file__)) + "/../combine_logs.py"), self.options.tmpdir))
exit_code = TEST_EXIT_FAILED
# Logging.shutdown will not remove stream- and filehandlers, so we must
# do it explicitly. Handlers are removed so the next test run can apply
# different log handler settings.
# See: https://docs.python.org/3/library/logging.html#logging.shutdown
for h in list(self.log.handlers):
h.flush()
h.close()
self.log.removeHandler(h)
rpc_logger = logging.getLogger("BitcoinRPC")
for h in list(rpc_logger.handlers):
h.flush()
rpc_logger.removeHandler(h)
if cleanup_tree_on_exit:
shutil.rmtree(self.options.tmpdir)
self.nodes.clear()
return exit_code
# Methods to override in subclass test scripts.
def set_test_params(self):
"""Tests must this method to change default values for number of nodes, topology, etc"""
raise NotImplementedError
def add_options(self, parser):
"""Override this method to add command-line options to the test"""
pass
def skip_test_if_missing_module(self):
"""Override this method to skip a test if a module is not compiled"""
pass
def setup_chain(self):
"""Override this method to customize blockchain setup"""
self.log.info("Initializing test directory " + self.options.tmpdir)
if self.setup_clean_chain:
self._initialize_chain_clean()
else:
self._initialize_chain()
def setup_network(self):
"""Override this method to customize test network topology"""
self.setup_nodes()
# Connect the nodes as a "chain". This allows us
# to split the network between nodes 1 and 2 to get
# two halves that can work on competing chains.
#
# Topology looks like this:
# node0 <-- node1 <-- node2 <-- node3
#
# If all nodes are in IBD (clean chain from genesis), node0 is assumed to be the source of blocks (miner). To
# ensure block propagation, all nodes will establish outgoing connections toward node0.
# See fPreferredDownload in net_processing.
#
# If further outbound connections are needed, they can be added at the beginning of the test with e.g.
# connect_nodes(self.nodes[1], 2)
for i in range(self.num_nodes - 1):
connect_nodes(self.nodes[i + 1], i)
self.sync_all()
def setup_nodes(self):
"""Override this method to customize test node setup"""
extra_args = None
if hasattr(self, "extra_args"):
extra_args = self.extra_args
self.add_nodes(self.num_nodes, extra_args)
self.start_nodes()
self.import_deterministic_coinbase_privkeys()
if not self.setup_clean_chain:
for n in self.nodes:
assert_equal(n.getblockchaininfo()["blocks"], 199)
# To ensure that all nodes are out of IBD, the most recent block
# must have a timestamp not too old (see IsInitialBlockDownload()).
self.log.debug('Generate a block with current time')
block_hash = self.nodes[0].generate(1)[0]
block = self.nodes[0].getblock(blockhash=block_hash, verbosity=0)
for n in self.nodes:
n.submitblock(block)
chain_info = n.getblockchaininfo()
assert_equal(chain_info["blocks"], 200)
assert_equal(chain_info["initialblockdownload"], False)
def import_deterministic_coinbase_privkeys(self):
for n in self.nodes:
try:
n.getwalletinfo()
except JSONRPCException as e:
assert str(e).startswith('Method not found')
continue
n.importprivkey(privkey=n.get_deterministic_priv_key().key, label='coinbase')
def run_test(self):
"""Tests must override this method to define test logic"""
raise NotImplementedError
# Public helper methods. These can be accessed by the subclass test scripts.
def add_nodes(self, num_nodes, extra_args=None, *, rpchost=None, binary=None, binary_cli=None, versions=None):
"""Instantiate TestNode objects.
Should only be called once after the nodes have been specified in
set_test_params()."""
if self.bind_to_localhost_only:
extra_confs = [["bind=127.0.0.1"]] * num_nodes
else:
extra_confs = [[]] * num_nodes
if extra_args is None:
extra_args = [[]] * num_nodes
if versions is None:
versions = [None] * num_nodes
if binary is None:
binary = [self.options.bitcoind] * num_nodes
if binary_cli is None:
binary_cli = [self.options.bitcoincli] * num_nodes
assert_equal(len(extra_confs), num_nodes)
assert_equal(len(extra_args), num_nodes)
assert_equal(len(versions), num_nodes)
assert_equal(len(binary), num_nodes)
assert_equal(len(binary_cli), num_nodes)
for i in range(num_nodes):
self.nodes.append(TestNode(
i,
get_datadir_path(self.options.tmpdir, i),
chain=self.chain,
rpchost=rpchost,
timewait=self.rpc_timeout,
bitcoind=binary[i],
bitcoin_cli=binary_cli[i],
version=versions[i],
coverage_dir=self.options.coveragedir,
cwd=self.options.tmpdir,
extra_conf=extra_confs[i],
extra_args=extra_args[i],
use_cli=self.options.usecli,
start_perf=self.options.perf,
use_valgrind=self.options.valgrind,
))
def start_node(self, i, *args, **kwargs):
"""Start a bitcoind"""
node = self.nodes[i]
node.start(*args, **kwargs)
node.wait_for_rpc_connection()
if self.options.coveragedir is not None:
coverage.write_all_rpc_commands(self.options.coveragedir, node.rpc)
def start_nodes(self, extra_args=None, *args, **kwargs):
"""Start multiple bitcoinds"""
if extra_args is None:
extra_args = [None] * self.num_nodes
assert_equal(len(extra_args), self.num_nodes)
try:
for i, node in enumerate(self.nodes):
node.start(extra_args[i], *args, **kwargs)
for node in self.nodes:
node.wait_for_rpc_connection()
except:
# If one node failed to start, stop the others
self.stop_nodes()
raise
if self.options.coveragedir is not None:
for node in self.nodes:
coverage.write_all_rpc_commands(self.options.coveragedir, node.rpc)
def stop_node(self, i, expected_stderr='', wait=0):
"""Stop a bitcoind test node"""
self.nodes[i].stop_node(expected_stderr, wait=wait)
self.nodes[i].wait_until_stopped()
def stop_nodes(self, wait=0):
"""Stop multiple bitcoind test nodes"""
for node in self.nodes:
# Issue RPC to stop nodes
node.stop_node(wait=wait)
for node in self.nodes:
# Wait for nodes to stop
node.wait_until_stopped()
def restart_node(self, i, extra_args=None):
"""Stop and start a test node"""
self.stop_node(i)
self.start_node(i, extra_args)
def wait_for_node_exit(self, i, timeout):
self.nodes[i].process.wait(timeout)
def split_network(self):
"""
Split the network of four nodes into nodes 0/1 and 2/3.
"""
disconnect_nodes(self.nodes[1], 2)
disconnect_nodes(self.nodes[2], 1)
self.sync_all(self.nodes[:2])
self.sync_all(self.nodes[2:])
def join_network(self):
"""
Join the (previously split) network halves together.
"""
connect_nodes(self.nodes[1], 2)
self.sync_all()
def sync_blocks(self, nodes=None, **kwargs):
sync_blocks(nodes or self.nodes, **kwargs)
def sync_mempools(self, nodes=None, **kwargs):
sync_mempools(nodes or self.nodes, **kwargs)
def sync_all(self, nodes=None, **kwargs):
self.sync_blocks(nodes, **kwargs)
self.sync_mempools(nodes, **kwargs)
# Private helper methods. These should not be accessed by the subclass test scripts.
def _start_logging(self):
# Add logger and logging handlers
self.log = logging.getLogger('TestFramework')
self.log.setLevel(logging.DEBUG)
# Create file handler to log all messages
fh = logging.FileHandler(self.options.tmpdir + '/test_framework.log', encoding='utf-8')
fh.setLevel(logging.DEBUG)
# Create console handler to log messages to stderr. By default this logs only error messages, but can be configured with --loglevel.
ch = logging.StreamHandler(sys.stdout)
# User can provide log level as a number or string (eg DEBUG). loglevel was caught as a string, so try to convert it to an int
ll = int(self.options.loglevel) if self.options.loglevel.isdigit() else self.options.loglevel.upper()
ch.setLevel(ll)
# Format logs the same as bitcoind's debug.log with microprecision (so log files can be concatenated and sorted)
formatter = logging.Formatter(fmt='%(asctime)s.%(msecs)03d000Z %(name)s (%(levelname)s): %(message)s', datefmt='%Y-%m-%dT%H:%M:%S')
formatter.converter = time.gmtime
fh.setFormatter(formatter)
ch.setFormatter(formatter)
# add the handlers to the logger
self.log.addHandler(fh)
self.log.addHandler(ch)
if self.options.trace_rpc:
rpc_logger = logging.getLogger("BitcoinRPC")
rpc_logger.setLevel(logging.DEBUG)
rpc_handler = logging.StreamHandler(sys.stdout)
rpc_handler.setLevel(logging.DEBUG)
rpc_logger.addHandler(rpc_handler)
def _initialize_chain(self):
"""Initialize a pre-mined blockchain for use by the test.
Create a cache of a 199-block-long chain
Afterward, create num_nodes copies from the cache."""
CACHE_NODE_ID = 0 # Use node 0 to create the cache for all other nodes
cache_node_dir = get_datadir_path(self.options.cachedir, CACHE_NODE_ID)
assert self.num_nodes <= MAX_NODES
if not os.path.isdir(cache_node_dir):
self.log.debug("Creating cache directory {}".format(cache_node_dir))
initialize_datadir(self.options.cachedir, CACHE_NODE_ID, self.chain)
self.nodes.append(
TestNode(
CACHE_NODE_ID,
cache_node_dir,
chain=self.chain,
extra_conf=["bind=127.0.0.1"],
extra_args=['-disablewallet'],
rpchost=None,
timewait=self.rpc_timeout,
bitcoind=self.options.bitcoind,
bitcoin_cli=self.options.bitcoincli,
coverage_dir=None,
cwd=self.options.tmpdir,
))
self.start_node(CACHE_NODE_ID)
# Wait for RPC connections to be ready
self.nodes[CACHE_NODE_ID].wait_for_rpc_connection()
# Create a 199-block-long chain; each of the 4 first nodes
# gets 25 mature blocks and 25 immature.
# The 4th node gets only 24 immature blocks so that the very last
# block in the cache does not age too much (have an old tip age).
# This is needed so that we are out of IBD when the test starts,
# see the tip age check in IsInitialBlockDownload().
for i in range(8):
self.nodes[CACHE_NODE_ID].generatetoaddress(
nblocks=25 if i != 7 else 24,
address=TestNode.PRIV_KEYS[i % 4].address,
)
assert_equal(self.nodes[CACHE_NODE_ID].getblockchaininfo()["blocks"], 199)
# Shut it down, and clean up cache directories:
self.stop_nodes()
self.nodes = []
def cache_path(*paths):
return os.path.join(cache_node_dir, self.chain, *paths)
os.rmdir(cache_path('wallets')) # Remove empty wallets dir
# Remove Omni specific dirs
shutil.rmtree(cache_path('OMNI_feecache'))
shutil.rmtree(cache_path('OMNI_feehistory'))
shutil.rmtree(cache_path('Omni_TXDB'))
shutil.rmtree(cache_path('OMNI_nftdb'))
shutil.rmtree(cache_path('MP_persist'))
shutil.rmtree(cache_path('MP_spinfo'))
shutil.rmtree(cache_path('MP_stolist'))
shutil.rmtree(cache_path('MP_tradelist'))
shutil.rmtree(cache_path('MP_txlist'))
shutil.rmtree(cache_path('indexes')) # for txindex enabled by default in Omni
for entry in os.listdir(cache_path()):
if entry not in ['chainstate', 'blocks']: # Only keep chainstate and blocks folder
os.remove(cache_path(entry))
for i in range(self.num_nodes):
self.log.debug("Copy cache directory {} to node {}".format(cache_node_dir, i))
to_dir = get_datadir_path(self.options.tmpdir, i)
shutil.copytree(cache_node_dir, to_dir)
initialize_datadir(self.options.tmpdir, i, self.chain) # Overwrite port/rpcport in bitcoin.conf
def _initialize_chain_clean(self):
"""Initialize empty blockchain for use by the test.
Create an empty blockchain and num_nodes wallets.
Useful if a test case wants complete control over initialization."""
for i in range(self.num_nodes):
initialize_datadir(self.options.tmpdir, i, self.chain)
def skip_if_no_py3_zmq(self):
"""Attempt to import the zmq package and skip the test if the import fails."""
try:
import zmq # noqa
except ImportError:
raise SkipTest("python3-zmq module not available.")
def skip_if_no_bitcoind_zmq(self):
"""Skip the running test if bitcoind has not been compiled with zmq support."""
if not self.is_zmq_compiled():
raise SkipTest("bitcoind has not been built with zmq enabled.")
def skip_if_no_wallet(self):
"""Skip the running test if wallet has not been compiled."""
if not self.is_wallet_compiled():
raise SkipTest("wallet has not been compiled.")
def skip_if_no_wallet_tool(self):
"""Skip the running test if bitcoin-wallet has not been compiled."""
if not self.is_wallet_tool_compiled():
raise SkipTest("bitcoin-wallet has not been compiled")
def skip_if_no_cli(self):
"""Skip the running test if bitcoin-cli has not been compiled."""
if not self.is_cli_compiled():
raise SkipTest("bitcoin-cli has not been compiled.")
def is_cli_compiled(self):
"""Checks whether bitcoin-cli was compiled."""
return self.config["components"].getboolean("ENABLE_CLI")
def is_wallet_compiled(self):
"""Checks whether the wallet module was compiled."""
return self.config["components"].getboolean("ENABLE_WALLET")
def is_wallet_tool_compiled(self):
"""Checks whether bitcoin-wallet was compiled."""
return self.config["components"].getboolean("ENABLE_WALLET_TOOL")
def is_zmq_compiled(self):
"""Checks whether the zmq module was compiled."""
return self.config["components"].getboolean("ENABLE_ZMQ")
|
|
from typing import Any, Dict, List, Optional, Union
import great_expectations.exceptions as ge_exceptions
from great_expectations.core.batch import Batch, BatchRequest, RuntimeBatchRequest
from great_expectations.core.profiler_types_mapping import ProfilerTypeMapping
from great_expectations.execution_engine.execution_engine import MetricDomainTypes
from great_expectations.rule_based_profiler.domain_builder import ColumnDomainBuilder
from great_expectations.rule_based_profiler.helpers.util import (
get_parameter_value_and_validate_return_type,
)
from great_expectations.rule_based_profiler.types import (
Domain,
InferredSemanticDomainType,
ParameterContainer,
SemanticDomainTypes,
)
from great_expectations.validator.metric_configuration import MetricConfiguration
class SimpleSemanticTypeColumnDomainBuilder(ColumnDomainBuilder):
"""
This DomainBuilder utilizes a "best-effort" semantic interpretation of ("storage") columns of a table.
"""
def __init__(
self,
batch_list: Optional[List[Batch]] = None,
batch_request: Optional[Union[BatchRequest, RuntimeBatchRequest, dict]] = None,
data_context: Optional["DataContext"] = None, # noqa: F821
column_names: Optional[Union[str, Optional[List[str]]]] = None,
semantic_types: Optional[
Union[str, SemanticDomainTypes, List[Union[str, SemanticDomainTypes]]]
] = None,
):
"""
Args:
batch_list: explicitly specified Batch objects for use in DomainBuilder
batch_request: specified in DomainBuilder configuration to get Batch objects for domain computation.
data_context: DataContext
column_names: Explicitly specified column_names list desired (if None, it is computed based on active Batch)
semantic_types: single or multiple type specifications using SemanticDomainTypes (or string equivalents)
"""
super().__init__(
batch_list=batch_list,
batch_request=batch_request,
data_context=data_context,
column_names=column_names,
)
if semantic_types is None:
semantic_types = []
self._semantic_types = semantic_types
@property
def domain_type(self) -> Union[str, MetricDomainTypes]:
return MetricDomainTypes.COLUMN
@property
def semantic_types(
self,
) -> Optional[
Union[str, SemanticDomainTypes, List[Union[str, SemanticDomainTypes]]]
]:
return self._semantic_types
def _get_domains(
self,
variables: Optional[ParameterContainer] = None,
) -> List[Domain]:
"""
Find the semantic column type for each column and return all domains matching the specified type or types.
"""
table_column_names: List[str] = self.get_effective_column_names(
include_columns=self.column_names,
exclude_columns=None,
variables=variables,
)
# Obtain semantic_types from "rule state" (i.e., variables and parameters); from instance variable otherwise.
semantic_types: Union[
str, SemanticDomainTypes, List[Union[str, SemanticDomainTypes]]
] = get_parameter_value_and_validate_return_type(
domain=None,
parameter_reference=self.semantic_types,
expected_return_type=None,
variables=variables,
parameters=None,
)
semantic_types: List[
SemanticDomainTypes
] = _parse_semantic_domain_type_argument(semantic_types=semantic_types)
batch_ids: List[str] = self.get_batch_ids(variables=variables)
column_types_dict_list: List[Dict[str, Any]] = self.get_validator(
variables=variables
).get_metric(
metric=MetricConfiguration(
metric_name="table.column_types",
metric_domain_kwargs={
"batch_id": batch_ids[-1], # active_batch_id
},
metric_value_kwargs={
"include_nested": True,
},
metric_dependencies=None,
)
)
column_name: str
# A semantic type is distinguished from the structured column type;
# An example structured column type would be "integer". The inferred semantic type would be "id".
table_column_name_to_inferred_semantic_domain_type_mapping: Dict[
str, SemanticDomainTypes
] = {
column_name: self.infer_semantic_domain_type_from_table_column_type(
column_types_dict_list=column_types_dict_list,
column_name=column_name,
).semantic_domain_type
for column_name in table_column_names
}
candidate_column_names: List[str] = list(
filter(
lambda candidate_column_name: table_column_name_to_inferred_semantic_domain_type_mapping[
candidate_column_name
]
in semantic_types,
table_column_names,
)
)
domains: List[Domain] = [
Domain(
domain_type=self.domain_type,
domain_kwargs={
"column": column_name,
},
details={
"inferred_semantic_domain_type": table_column_name_to_inferred_semantic_domain_type_mapping[
column_name
],
},
)
for column_name in candidate_column_names
]
return domains
# This method (default implementation) can be overwritten (with different implementation mechanisms) by subclasses.
# noinspection PyMethodMayBeStatic
def infer_semantic_domain_type_from_table_column_type(
self,
column_types_dict_list: List[Dict[str, Any]],
column_name: str,
) -> InferredSemanticDomainType:
# Note: As of Python 3.8, specifying argument type in Lambda functions is not supported by Lambda syntax.
column_types_dict_list = list(
filter(
lambda column_type_dict: column_name == column_type_dict["name"],
column_types_dict_list,
)
)
if len(column_types_dict_list) != 1:
raise ge_exceptions.ProfilerExecutionError(
message=f"""Error: {len(column_types_dict_list)} columns were found while obtaining semantic type \
information. Please ensure that the specified column name refers to exactly one column.
"""
)
column_type: str = str(column_types_dict_list[0]["type"]).upper()
semantic_column_type: SemanticDomainTypes
if column_type in (
{type_name.upper() for type_name in ProfilerTypeMapping.INT_TYPE_NAMES}
| {type_name.upper() for type_name in ProfilerTypeMapping.FLOAT_TYPE_NAMES}
):
semantic_column_type = SemanticDomainTypes.NUMERIC
elif column_type in {
type_name.upper() for type_name in ProfilerTypeMapping.STRING_TYPE_NAMES
}:
semantic_column_type = SemanticDomainTypes.TEXT
elif column_type in {
type_name.upper() for type_name in ProfilerTypeMapping.BOOLEAN_TYPE_NAMES
}:
semantic_column_type = SemanticDomainTypes.LOGIC
elif column_type in {
type_name.upper() for type_name in ProfilerTypeMapping.DATETIME_TYPE_NAMES
}:
semantic_column_type = SemanticDomainTypes.DATETIME
elif column_type in {
type_name.upper() for type_name in ProfilerTypeMapping.BINARY_TYPE_NAMES
}:
semantic_column_type = SemanticDomainTypes.BINARY
elif column_type in {
type_name.upper() for type_name in ProfilerTypeMapping.CURRENCY_TYPE_NAMES
}:
semantic_column_type = SemanticDomainTypes.CURRENCY
elif column_type in {
type_name.upper() for type_name in ProfilerTypeMapping.IDENTIFIER_TYPE_NAMES
}:
semantic_column_type = SemanticDomainTypes.IDENTIFIER
elif column_type in (
{
type_name.upper()
for type_name in ProfilerTypeMapping.MISCELLANEOUS_TYPE_NAMES
}
| {type_name.upper() for type_name in ProfilerTypeMapping.RECORD_TYPE_NAMES}
):
semantic_column_type = SemanticDomainTypes.MISCELLANEOUS
else:
semantic_column_type = SemanticDomainTypes.UNKNOWN
inferred_semantic_column_type: InferredSemanticDomainType = (
InferredSemanticDomainType(
semantic_domain_type=semantic_column_type,
details={
"algorithm_type": "deterministic",
"mechanism": "lookup_table",
"source": "great_expectations.profile.base.ProfilerTypeMapping",
},
)
)
return inferred_semantic_column_type
def _parse_semantic_domain_type_argument(
semantic_types: Optional[
Union[str, SemanticDomainTypes, List[Union[str, SemanticDomainTypes]]]
] = None
) -> List[SemanticDomainTypes]:
if semantic_types is None:
return []
semantic_type: Union[str, SemanticDomainTypes]
if isinstance(semantic_types, str):
semantic_types = semantic_types.upper()
return [
SemanticDomainTypes[semantic_type] for semantic_type in [semantic_types]
]
if isinstance(semantic_types, SemanticDomainTypes):
return [semantic_type for semantic_type in [semantic_types]]
elif isinstance(semantic_types, list):
if all([isinstance(semantic_type, str) for semantic_type in semantic_types]):
semantic_types = [semantic_type.upper() for semantic_type in semantic_types]
return [
SemanticDomainTypes[semantic_type] for semantic_type in semantic_types
]
elif all(
[
isinstance(semantic_type, SemanticDomainTypes)
for semantic_type in semantic_types
]
):
return [semantic_type for semantic_type in semantic_types]
else:
raise ValueError(
"All elements in semantic_types list must be either of str or SemanticDomainTypes type."
)
else:
raise ValueError("Unrecognized semantic_types directive.")
|
|
"""
This module contains all tests for glance_api.modules.functions.py
"""
import os
import pytest
import requests
import sqlalchemy
from glance_api.modules import functions
from glance_api.modules import models
from glance_api import api
# TODO: Finish testing Item
# TODO: Currently using sqlite3 database for tests, need to use postgres instead
# TODO: Figure out how to make test database in postgres programmically.
@pytest.fixture(scope='session')
def connection(request):
db_name = 'sqlite_test_database.db'
engine = sqlalchemy.create_engine(f'sqlite:///tests/{db_name}')
models.Base.metadata.create_all(engine)
connection = engine.connect()
api.session.registry.clear()
api.session.configure(bind=connection)
models.Base.metadata.bind = engine
request.addfinalizer(models.Base.metadata.drop_all)
return connection
@pytest.fixture
def db_session(request, connection):
trans = connection.begin()
request.addfinalizer(trans.rollback)
from glance_api.api import session
return session
def test_Item_with_no_session():
with pytest.raises(TypeError):
functions.Item()
def test_Item_tags_from_queries_returns_type_list(db_session):
test_data = {'filter': 'image', 'filter_people': None, 'query': 'animal'}
test_method = functions.Item(db_session)._tags_from_queries(test_data)
assert type(test_method) == list
def test_Item_tags_from_queries_no_tags(db_session):
test_data = {'filter': 'image', 'filter_people': None, 'query': 'TEST_TAGS'}
test_method = functions.Item(db_session)._tags_from_queries(test_data)
assert len(test_method) == 0
def test_Item_tags_from_queries_tags(db_session):
test_query = {'filter': 'image', 'filter_people': None, 'query': ''}
test_tags = ['TEST_TAG_ONE', 'TEST_TAG_TWO', 'TEST_TAG_THREE']
for tag in test_tags:
new_tag = models.Tag(name=tag)
db_session.add(new_tag)
test_method = functions.Item(db_session)._tags_from_queries(test_query)
assert len(test_method) == 3
def test_Item_tags_from_queries_query(db_session):
test_query = {'filter': '', 'filter_people': None, 'query': 'querytag notfoundtag'}
test_tags = ['_one', '_group', 'querytag', 'notfoundtag']
for tag in test_tags:
new_tag = models.Tag(name=tag)
db_session.add(new_tag)
test_method = functions.Item(db_session)._tags_from_queries(test_query)
assert len(test_method) == 2
def test_Item_tags_from_queries_filter_people(db_session):
test_query = {'filter': 'people', 'filter_people': '_one _group', 'query': 'none'}
test_tags = ['_one', '_group', 'querytag', 'notfoundtag']
for tag in test_tags:
new_tag = models.Tag(name=tag)
db_session.add(new_tag)
test_method = functions.Item(db_session)._tags_from_queries(test_query)
assert len(test_method) == 2
def test_Item_tags_from_queries_filter_people_and_query(db_session):
test_query = {'filter': 'people', 'filter_people': '_one _group', 'query': 'querytag'}
test_tags = ['_one', '_group', 'querytag', 'notfoundtag']
for tag in test_tags:
new_tag = models.Tag(name=tag)
db_session.add(new_tag)
test_method = functions.Item(db_session)._tags_from_queries(test_query)
assert len(test_method) == 3
def test_Item_filter_tags_returns_list(db_session):
test_query = {'filter': 'image', 'filter_people': None, 'query': ''}
test_tags = ['TEST_TAG_ONE', 'TEST_TAG_TWO', 'TEST_TAG_THREE']
for tag in test_tags:
new_tag = models.Tag(name=tag)
db_session.add(new_tag)
tags = db_session.query(models.Tag).all()
test_method = functions.Item(db_session)._filter_tags(test_query, tags)
assert type(test_method) == list
def test_Item_filter_tags_image_has_tags(db_session):
test_tags = ['TEST_TAG_ONE', 'TEST_TAG_TWO', 'TEST_TAG_THREE']
test_query = {'filter': 'image', 'filter_people': None, 'query': ' '.join(test_tags)}
new_image = models.Image(name='test')
for tag in test_tags:
new_tag = models.Tag(name=tag)
db_session.add(new_tag)
get_tag = db_session.query(models.Tag).filter_by(name=test_tags[0]).first()
new_image.tags.append(get_tag)
db_session.add(new_image)
test_new_tag = db_session.query(models.Tag).all()
test_method = functions.Item(db_session)._filter_tags(test_query, test_new_tag)
assert len(test_method) == 1
def test_Item_filter_tags_image_has_no_tags(db_session):
test_tags = ['TEST_TAG_ONE', 'TEST_TAG_TWO', 'TEST_TAG_THREE']
test_query = {'filter': 'image', 'filter_people': None, 'query': ' '.join(test_tags)}
new_image = models.Image(name='test')
db_session.add(new_image)
for tag in test_tags:
new_tag = models.Tag(name=tag)
db_session.add(new_tag)
test_new_tag = db_session.query(models.Tag).all()
test_method = functions.Item(db_session)._filter_tags(test_query, test_new_tag)
assert len(test_method) == 0
def test_Item_filter_tags_no_filter(db_session):
test_tags = ['TEST_TAG_ONE', 'TEST_TAG_TWO', 'TEST_TAG_THREE']
test_query = {'filter': None, 'filter_people': None, 'query': ' '.join(test_tags)}
new_image = models.Image(name='test')
new_footage = models.Footage(name='test')
db_session.add(new_image)
db_session.add(new_footage)
for tag in test_tags:
new_tag = models.Tag(name=tag)
db_session.add(new_tag)
get_tag_one = db_session.query(models.Tag).filter_by(name=test_tags[0]).first()
get_tag_two = db_session.query(models.Tag).filter_by(name=test_tags[0]).first()
new_image.tags.append(get_tag_one)
new_footage.tags.append(get_tag_two)
test_new_tag = db_session.query(models.Tag).all()
test_method = functions.Item(db_session)._filter_tags(test_query, test_new_tag)
assert len(test_method) == 2
def test_Item_get_id_does_not_exists(db_session):
test_data = {'id': 999, 'query': None, 'filter_people': None}
test_method = functions.Item(db_session).get(id=test_data['id'])
assert test_method == None
# TODO: Figure out how to make a test database in postgres programically
# for the following tests.
"""
def test_Item_get_id_does_exists(db_session):
test_data = {'id': 1, 'query': None, 'filter_people': None}
new_item = models.Item(type='image')
db_session.add(new_item)
test_method = functions.Item(db_session).get(id=test_data['id'])
assert test_method == True
def test_Item_delete():
pass
def test_Item_post():
pass
def test_Item_patch():
pass
"""
|
|
import decimal
import json
import unittest
import uuid
from django import forms
from django.core import exceptions, serializers, validators
from django.core.exceptions import FieldError
from django.core.management import call_command
from django.db import IntegrityError, connection, models
from django.test import TransactionTestCase, modify_settings, override_settings
from django.test.utils import isolate_apps
from django.utils import timezone
from . import PostgreSQLTestCase, PostgreSQLWidgetTestCase
from .models import (
ArrayFieldSubclass, CharArrayModel, DateTimeArrayModel, IntegerArrayModel,
NestedIntegerArrayModel, NullableIntegerArrayModel, OtherTypesArrayModel,
PostgreSQLModel, Tag,
)
try:
from django.contrib.postgres.fields import ArrayField
from django.contrib.postgres.forms import (
SimpleArrayField, SplitArrayField, SplitArrayWidget,
)
except ImportError:
pass
class TestSaveLoad(PostgreSQLTestCase):
def test_integer(self):
instance = IntegerArrayModel(field=[1, 2, 3])
instance.save()
loaded = IntegerArrayModel.objects.get()
self.assertEqual(instance.field, loaded.field)
def test_char(self):
instance = CharArrayModel(field=['hello', 'goodbye'])
instance.save()
loaded = CharArrayModel.objects.get()
self.assertEqual(instance.field, loaded.field)
def test_dates(self):
instance = DateTimeArrayModel(
datetimes=[timezone.now()],
dates=[timezone.now().date()],
times=[timezone.now().time()],
)
instance.save()
loaded = DateTimeArrayModel.objects.get()
self.assertEqual(instance.datetimes, loaded.datetimes)
self.assertEqual(instance.dates, loaded.dates)
self.assertEqual(instance.times, loaded.times)
def test_tuples(self):
instance = IntegerArrayModel(field=(1,))
instance.save()
loaded = IntegerArrayModel.objects.get()
self.assertSequenceEqual(instance.field, loaded.field)
def test_integers_passed_as_strings(self):
# This checks that get_prep_value is deferred properly
instance = IntegerArrayModel(field=['1'])
instance.save()
loaded = IntegerArrayModel.objects.get()
self.assertEqual(loaded.field, [1])
def test_default_null(self):
instance = NullableIntegerArrayModel()
instance.save()
loaded = NullableIntegerArrayModel.objects.get(pk=instance.pk)
self.assertIsNone(loaded.field)
self.assertEqual(instance.field, loaded.field)
def test_null_handling(self):
instance = NullableIntegerArrayModel(field=None)
instance.save()
loaded = NullableIntegerArrayModel.objects.get()
self.assertEqual(instance.field, loaded.field)
instance = IntegerArrayModel(field=None)
with self.assertRaises(IntegrityError):
instance.save()
def test_nested(self):
instance = NestedIntegerArrayModel(field=[[1, 2], [3, 4]])
instance.save()
loaded = NestedIntegerArrayModel.objects.get()
self.assertEqual(instance.field, loaded.field)
def test_other_array_types(self):
instance = OtherTypesArrayModel(
ips=['192.168.0.1', '::1'],
uuids=[uuid.uuid4()],
decimals=[decimal.Decimal(1.25), 1.75],
tags=[Tag(1), Tag(2), Tag(3)],
)
instance.save()
loaded = OtherTypesArrayModel.objects.get()
self.assertEqual(instance.ips, loaded.ips)
self.assertEqual(instance.uuids, loaded.uuids)
self.assertEqual(instance.decimals, loaded.decimals)
self.assertEqual(instance.tags, loaded.tags)
def test_null_from_db_value_handling(self):
instance = OtherTypesArrayModel.objects.create(
ips=['192.168.0.1', '::1'],
uuids=[uuid.uuid4()],
decimals=[decimal.Decimal(1.25), 1.75],
tags=None,
)
instance.refresh_from_db()
self.assertIsNone(instance.tags)
def test_model_set_on_base_field(self):
instance = IntegerArrayModel()
field = instance._meta.get_field('field')
self.assertEqual(field.model, IntegerArrayModel)
self.assertEqual(field.base_field.model, IntegerArrayModel)
class TestQuerying(PostgreSQLTestCase):
def setUp(self):
self.objs = [
NullableIntegerArrayModel.objects.create(field=[1]),
NullableIntegerArrayModel.objects.create(field=[2]),
NullableIntegerArrayModel.objects.create(field=[2, 3]),
NullableIntegerArrayModel.objects.create(field=[20, 30, 40]),
NullableIntegerArrayModel.objects.create(field=None),
]
def test_exact(self):
self.assertSequenceEqual(
NullableIntegerArrayModel.objects.filter(field__exact=[1]),
self.objs[:1]
)
def test_exact_charfield(self):
instance = CharArrayModel.objects.create(field=['text'])
self.assertSequenceEqual(
CharArrayModel.objects.filter(field=['text']),
[instance]
)
def test_exact_nested(self):
instance = NestedIntegerArrayModel.objects.create(field=[[1, 2], [3, 4]])
self.assertSequenceEqual(
NestedIntegerArrayModel.objects.filter(field=[[1, 2], [3, 4]]),
[instance]
)
def test_isnull(self):
self.assertSequenceEqual(
NullableIntegerArrayModel.objects.filter(field__isnull=True),
self.objs[-1:]
)
def test_gt(self):
self.assertSequenceEqual(
NullableIntegerArrayModel.objects.filter(field__gt=[0]),
self.objs[:4]
)
def test_lt(self):
self.assertSequenceEqual(
NullableIntegerArrayModel.objects.filter(field__lt=[2]),
self.objs[:1]
)
def test_in(self):
self.assertSequenceEqual(
NullableIntegerArrayModel.objects.filter(field__in=[[1], [2]]),
self.objs[:2]
)
@unittest.expectedFailure
def test_in_including_F_object(self):
# This test asserts that Array objects passed to filters can be
# constructed to contain F objects. This currently doesn't work as the
# psycopg2 mogrify method that generates the ARRAY() syntax is
# expecting literals, not column references (#27095).
self.assertSequenceEqual(
NullableIntegerArrayModel.objects.filter(field__in=[[models.F('id')]]),
self.objs[:2]
)
def test_in_as_F_object(self):
self.assertSequenceEqual(
NullableIntegerArrayModel.objects.filter(field__in=[models.F('field')]),
self.objs[:4]
)
def test_contained_by(self):
self.assertSequenceEqual(
NullableIntegerArrayModel.objects.filter(field__contained_by=[1, 2]),
self.objs[:2]
)
@unittest.expectedFailure
def test_contained_by_including_F_object(self):
# This test asserts that Array objects passed to filters can be
# constructed to contain F objects. This currently doesn't work as the
# psycopg2 mogrify method that generates the ARRAY() syntax is
# expecting literals, not column references (#27095).
self.assertSequenceEqual(
NullableIntegerArrayModel.objects.filter(field__contained_by=[models.F('id'), 2]),
self.objs[:2]
)
def test_contains(self):
self.assertSequenceEqual(
NullableIntegerArrayModel.objects.filter(field__contains=[2]),
self.objs[1:3]
)
def test_icontains(self):
# Using the __icontains lookup with ArrayField is inefficient.
instance = CharArrayModel.objects.create(field=['FoO'])
self.assertSequenceEqual(
CharArrayModel.objects.filter(field__icontains='foo'),
[instance]
)
def test_contains_charfield(self):
# Regression for #22907
self.assertSequenceEqual(
CharArrayModel.objects.filter(field__contains=['text']),
[]
)
def test_contained_by_charfield(self):
self.assertSequenceEqual(
CharArrayModel.objects.filter(field__contained_by=['text']),
[]
)
def test_overlap_charfield(self):
self.assertSequenceEqual(
CharArrayModel.objects.filter(field__overlap=['text']),
[]
)
def test_index(self):
self.assertSequenceEqual(
NullableIntegerArrayModel.objects.filter(field__0=2),
self.objs[1:3]
)
def test_index_chained(self):
self.assertSequenceEqual(
NullableIntegerArrayModel.objects.filter(field__0__lt=3),
self.objs[0:3]
)
def test_index_nested(self):
instance = NestedIntegerArrayModel.objects.create(field=[[1, 2], [3, 4]])
self.assertSequenceEqual(
NestedIntegerArrayModel.objects.filter(field__0__0=1),
[instance]
)
@unittest.expectedFailure
def test_index_used_on_nested_data(self):
instance = NestedIntegerArrayModel.objects.create(field=[[1, 2], [3, 4]])
self.assertSequenceEqual(
NestedIntegerArrayModel.objects.filter(field__0=[1, 2]),
[instance]
)
def test_overlap(self):
self.assertSequenceEqual(
NullableIntegerArrayModel.objects.filter(field__overlap=[1, 2]),
self.objs[0:3]
)
def test_len(self):
self.assertSequenceEqual(
NullableIntegerArrayModel.objects.filter(field__len__lte=2),
self.objs[0:3]
)
def test_len_empty_array(self):
obj = NullableIntegerArrayModel.objects.create(field=[])
self.assertSequenceEqual(
NullableIntegerArrayModel.objects.filter(field__len=0),
[obj]
)
def test_slice(self):
self.assertSequenceEqual(
NullableIntegerArrayModel.objects.filter(field__0_1=[2]),
self.objs[1:3]
)
self.assertSequenceEqual(
NullableIntegerArrayModel.objects.filter(field__0_2=[2, 3]),
self.objs[2:3]
)
@unittest.expectedFailure
def test_slice_nested(self):
instance = NestedIntegerArrayModel.objects.create(field=[[1, 2], [3, 4]])
self.assertSequenceEqual(
NestedIntegerArrayModel.objects.filter(field__0__0_1=[1]),
[instance]
)
def test_usage_in_subquery(self):
self.assertSequenceEqual(
NullableIntegerArrayModel.objects.filter(
id__in=NullableIntegerArrayModel.objects.filter(field__len=3)
),
[self.objs[3]]
)
def test_unsupported_lookup(self):
msg = "Unsupported lookup '0_bar' for ArrayField or join on the field not permitted."
with self.assertRaisesMessage(FieldError, msg):
list(NullableIntegerArrayModel.objects.filter(field__0_bar=[2]))
msg = "Unsupported lookup '0bar' for ArrayField or join on the field not permitted."
with self.assertRaisesMessage(FieldError, msg):
list(NullableIntegerArrayModel.objects.filter(field__0bar=[2]))
class TestDateTimeExactQuerying(PostgreSQLTestCase):
def setUp(self):
now = timezone.now()
self.datetimes = [now]
self.dates = [now.date()]
self.times = [now.time()]
self.objs = [
DateTimeArrayModel.objects.create(
datetimes=self.datetimes,
dates=self.dates,
times=self.times,
)
]
def test_exact_datetimes(self):
self.assertSequenceEqual(
DateTimeArrayModel.objects.filter(datetimes=self.datetimes),
self.objs
)
def test_exact_dates(self):
self.assertSequenceEqual(
DateTimeArrayModel.objects.filter(dates=self.dates),
self.objs
)
def test_exact_times(self):
self.assertSequenceEqual(
DateTimeArrayModel.objects.filter(times=self.times),
self.objs
)
class TestOtherTypesExactQuerying(PostgreSQLTestCase):
def setUp(self):
self.ips = ['192.168.0.1', '::1']
self.uuids = [uuid.uuid4()]
self.decimals = [decimal.Decimal(1.25), 1.75]
self.tags = [Tag(1), Tag(2), Tag(3)]
self.objs = [
OtherTypesArrayModel.objects.create(
ips=self.ips,
uuids=self.uuids,
decimals=self.decimals,
tags=self.tags,
)
]
def test_exact_ip_addresses(self):
self.assertSequenceEqual(
OtherTypesArrayModel.objects.filter(ips=self.ips),
self.objs
)
def test_exact_uuids(self):
self.assertSequenceEqual(
OtherTypesArrayModel.objects.filter(uuids=self.uuids),
self.objs
)
def test_exact_decimals(self):
self.assertSequenceEqual(
OtherTypesArrayModel.objects.filter(decimals=self.decimals),
self.objs
)
def test_exact_tags(self):
self.assertSequenceEqual(
OtherTypesArrayModel.objects.filter(tags=self.tags),
self.objs
)
@isolate_apps('postgres_tests')
class TestChecks(PostgreSQLTestCase):
def test_field_checks(self):
class MyModel(PostgreSQLModel):
field = ArrayField(models.CharField())
model = MyModel()
errors = model.check()
self.assertEqual(len(errors), 1)
# The inner CharField is missing a max_length.
self.assertEqual(errors[0].id, 'postgres.E001')
self.assertIn('max_length', errors[0].msg)
def test_invalid_base_fields(self):
class MyModel(PostgreSQLModel):
field = ArrayField(models.ManyToManyField('postgres_tests.IntegerArrayModel'))
model = MyModel()
errors = model.check()
self.assertEqual(len(errors), 1)
self.assertEqual(errors[0].id, 'postgres.E002')
def test_nested_field_checks(self):
"""
Nested ArrayFields are permitted.
"""
class MyModel(PostgreSQLModel):
field = ArrayField(ArrayField(models.CharField()))
model = MyModel()
errors = model.check()
self.assertEqual(len(errors), 1)
# The inner CharField is missing a max_length.
self.assertEqual(errors[0].id, 'postgres.E001')
self.assertIn('max_length', errors[0].msg)
@unittest.skipUnless(connection.vendor == 'postgresql', "PostgreSQL specific tests")
class TestMigrations(TransactionTestCase):
available_apps = ['postgres_tests']
def test_deconstruct(self):
field = ArrayField(models.IntegerField())
name, path, args, kwargs = field.deconstruct()
new = ArrayField(*args, **kwargs)
self.assertEqual(type(new.base_field), type(field.base_field))
self.assertIsNot(new.base_field, field.base_field)
def test_deconstruct_with_size(self):
field = ArrayField(models.IntegerField(), size=3)
name, path, args, kwargs = field.deconstruct()
new = ArrayField(*args, **kwargs)
self.assertEqual(new.size, field.size)
def test_deconstruct_args(self):
field = ArrayField(models.CharField(max_length=20))
name, path, args, kwargs = field.deconstruct()
new = ArrayField(*args, **kwargs)
self.assertEqual(new.base_field.max_length, field.base_field.max_length)
def test_subclass_deconstruct(self):
field = ArrayField(models.IntegerField())
name, path, args, kwargs = field.deconstruct()
self.assertEqual(path, 'django.contrib.postgres.fields.ArrayField')
field = ArrayFieldSubclass()
name, path, args, kwargs = field.deconstruct()
self.assertEqual(path, 'postgres_tests.models.ArrayFieldSubclass')
@override_settings(MIGRATION_MODULES={
"postgres_tests": "postgres_tests.array_default_migrations",
})
def test_adding_field_with_default(self):
# See #22962
table_name = 'postgres_tests_integerarraydefaultmodel'
with connection.cursor() as cursor:
self.assertNotIn(table_name, connection.introspection.table_names(cursor))
call_command('migrate', 'postgres_tests', verbosity=0)
with connection.cursor() as cursor:
self.assertIn(table_name, connection.introspection.table_names(cursor))
call_command('migrate', 'postgres_tests', 'zero', verbosity=0)
with connection.cursor() as cursor:
self.assertNotIn(table_name, connection.introspection.table_names(cursor))
@override_settings(MIGRATION_MODULES={
"postgres_tests": "postgres_tests.array_index_migrations",
})
def test_adding_arrayfield_with_index(self):
"""
ArrayField shouldn't have varchar_patterns_ops or text_patterns_ops indexes.
"""
table_name = 'postgres_tests_chartextarrayindexmodel'
call_command('migrate', 'postgres_tests', verbosity=0)
with connection.cursor() as cursor:
like_constraint_columns_list = [
v['columns']
for k, v in list(connection.introspection.get_constraints(cursor, table_name).items())
if k.endswith('_like')
]
# Only the CharField should have a LIKE index.
self.assertEqual(like_constraint_columns_list, [['char2']])
# All fields should have regular indexes.
with connection.cursor() as cursor:
indexes = [
c['columns'][0]
for c in connection.introspection.get_constraints(cursor, table_name).values()
if c['index'] and len(c['columns']) == 1
]
self.assertIn('char', indexes)
self.assertIn('char2', indexes)
self.assertIn('text', indexes)
call_command('migrate', 'postgres_tests', 'zero', verbosity=0)
with connection.cursor() as cursor:
self.assertNotIn(table_name, connection.introspection.table_names(cursor))
class TestSerialization(PostgreSQLTestCase):
test_data = (
'[{"fields": {"field": "[\\"1\\", \\"2\\", null]"}, "model": "postgres_tests.integerarraymodel", "pk": null}]'
)
def test_dumping(self):
instance = IntegerArrayModel(field=[1, 2, None])
data = serializers.serialize('json', [instance])
self.assertEqual(json.loads(data), json.loads(self.test_data))
def test_loading(self):
instance = list(serializers.deserialize('json', self.test_data))[0].object
self.assertEqual(instance.field, [1, 2, None])
class TestValidation(PostgreSQLTestCase):
def test_unbounded(self):
field = ArrayField(models.IntegerField())
with self.assertRaises(exceptions.ValidationError) as cm:
field.clean([1, None], None)
self.assertEqual(cm.exception.code, 'item_invalid')
self.assertEqual(
cm.exception.message % cm.exception.params,
'Item 1 in the array did not validate: This field cannot be null.'
)
def test_blank_true(self):
field = ArrayField(models.IntegerField(blank=True, null=True))
# This should not raise a validation error
field.clean([1, None], None)
def test_with_size(self):
field = ArrayField(models.IntegerField(), size=3)
field.clean([1, 2, 3], None)
with self.assertRaises(exceptions.ValidationError) as cm:
field.clean([1, 2, 3, 4], None)
self.assertEqual(cm.exception.messages[0], 'List contains 4 items, it should contain no more than 3.')
def test_nested_array_mismatch(self):
field = ArrayField(ArrayField(models.IntegerField()))
field.clean([[1, 2], [3, 4]], None)
with self.assertRaises(exceptions.ValidationError) as cm:
field.clean([[1, 2], [3, 4, 5]], None)
self.assertEqual(cm.exception.code, 'nested_array_mismatch')
self.assertEqual(cm.exception.messages[0], 'Nested arrays must have the same length.')
def test_with_base_field_error_params(self):
field = ArrayField(models.CharField(max_length=2))
with self.assertRaises(exceptions.ValidationError) as cm:
field.clean(['abc'], None)
self.assertEqual(len(cm.exception.error_list), 1)
exception = cm.exception.error_list[0]
self.assertEqual(
exception.message,
'Item 0 in the array did not validate: Ensure this value has at most 2 characters (it has 3).'
)
self.assertEqual(exception.code, 'item_invalid')
self.assertEqual(exception.params, {'nth': 0, 'value': 'abc', 'limit_value': 2, 'show_value': 3})
def test_with_validators(self):
field = ArrayField(models.IntegerField(validators=[validators.MinValueValidator(1)]))
field.clean([1, 2], None)
with self.assertRaises(exceptions.ValidationError) as cm:
field.clean([0], None)
self.assertEqual(len(cm.exception.error_list), 1)
exception = cm.exception.error_list[0]
self.assertEqual(
exception.message,
'Item 0 in the array did not validate: Ensure this value is greater than or equal to 1.'
)
self.assertEqual(exception.code, 'item_invalid')
self.assertEqual(exception.params, {'nth': 0, 'value': 0, 'limit_value': 1, 'show_value': 0})
class TestSimpleFormField(PostgreSQLTestCase):
def test_valid(self):
field = SimpleArrayField(forms.CharField())
value = field.clean('a,b,c')
self.assertEqual(value, ['a', 'b', 'c'])
def test_to_python_fail(self):
field = SimpleArrayField(forms.IntegerField())
with self.assertRaises(exceptions.ValidationError) as cm:
field.clean('a,b,9')
self.assertEqual(cm.exception.messages[0], 'Item 0 in the array did not validate: Enter a whole number.')
def test_validate_fail(self):
field = SimpleArrayField(forms.CharField(required=True))
with self.assertRaises(exceptions.ValidationError) as cm:
field.clean('a,b,')
self.assertEqual(cm.exception.messages[0], 'Item 2 in the array did not validate: This field is required.')
def test_validate_fail_base_field_error_params(self):
field = SimpleArrayField(forms.CharField(max_length=2))
with self.assertRaises(exceptions.ValidationError) as cm:
field.clean('abc,c,defg')
errors = cm.exception.error_list
self.assertEqual(len(errors), 2)
first_error = errors[0]
self.assertEqual(
first_error.message,
'Item 0 in the array did not validate: Ensure this value has at most 2 characters (it has 3).'
)
self.assertEqual(first_error.code, 'item_invalid')
self.assertEqual(first_error.params, {'nth': 0, 'value': 'abc', 'limit_value': 2, 'show_value': 3})
second_error = errors[1]
self.assertEqual(
second_error.message,
'Item 2 in the array did not validate: Ensure this value has at most 2 characters (it has 4).'
)
self.assertEqual(second_error.code, 'item_invalid')
self.assertEqual(second_error.params, {'nth': 2, 'value': 'defg', 'limit_value': 2, 'show_value': 4})
def test_validators_fail(self):
field = SimpleArrayField(forms.RegexField('[a-e]{2}'))
with self.assertRaises(exceptions.ValidationError) as cm:
field.clean('a,bc,de')
self.assertEqual(cm.exception.messages[0], 'Item 0 in the array did not validate: Enter a valid value.')
def test_delimiter(self):
field = SimpleArrayField(forms.CharField(), delimiter='|')
value = field.clean('a|b|c')
self.assertEqual(value, ['a', 'b', 'c'])
def test_delimiter_with_nesting(self):
field = SimpleArrayField(SimpleArrayField(forms.CharField()), delimiter='|')
value = field.clean('a,b|c,d')
self.assertEqual(value, [['a', 'b'], ['c', 'd']])
def test_prepare_value(self):
field = SimpleArrayField(forms.CharField())
value = field.prepare_value(['a', 'b', 'c'])
self.assertEqual(value, 'a,b,c')
def test_max_length(self):
field = SimpleArrayField(forms.CharField(), max_length=2)
with self.assertRaises(exceptions.ValidationError) as cm:
field.clean('a,b,c')
self.assertEqual(cm.exception.messages[0], 'List contains 3 items, it should contain no more than 2.')
def test_min_length(self):
field = SimpleArrayField(forms.CharField(), min_length=4)
with self.assertRaises(exceptions.ValidationError) as cm:
field.clean('a,b,c')
self.assertEqual(cm.exception.messages[0], 'List contains 3 items, it should contain no fewer than 4.')
def test_required(self):
field = SimpleArrayField(forms.CharField(), required=True)
with self.assertRaises(exceptions.ValidationError) as cm:
field.clean('')
self.assertEqual(cm.exception.messages[0], 'This field is required.')
def test_model_field_formfield(self):
model_field = ArrayField(models.CharField(max_length=27))
form_field = model_field.formfield()
self.assertIsInstance(form_field, SimpleArrayField)
self.assertIsInstance(form_field.base_field, forms.CharField)
self.assertEqual(form_field.base_field.max_length, 27)
def test_model_field_formfield_size(self):
model_field = ArrayField(models.CharField(max_length=27), size=4)
form_field = model_field.formfield()
self.assertIsInstance(form_field, SimpleArrayField)
self.assertEqual(form_field.max_length, 4)
def test_already_converted_value(self):
field = SimpleArrayField(forms.CharField())
vals = ['a', 'b', 'c']
self.assertEqual(field.clean(vals), vals)
class TestSplitFormField(PostgreSQLTestCase):
def test_valid(self):
class SplitForm(forms.Form):
array = SplitArrayField(forms.CharField(), size=3)
data = {'array_0': 'a', 'array_1': 'b', 'array_2': 'c'}
form = SplitForm(data)
self.assertTrue(form.is_valid())
self.assertEqual(form.cleaned_data, {'array': ['a', 'b', 'c']})
def test_required(self):
class SplitForm(forms.Form):
array = SplitArrayField(forms.CharField(), required=True, size=3)
data = {'array_0': '', 'array_1': '', 'array_2': ''}
form = SplitForm(data)
self.assertFalse(form.is_valid())
self.assertEqual(form.errors, {'array': ['This field is required.']})
def test_remove_trailing_nulls(self):
class SplitForm(forms.Form):
array = SplitArrayField(forms.CharField(required=False), size=5, remove_trailing_nulls=True)
data = {'array_0': 'a', 'array_1': '', 'array_2': 'b', 'array_3': '', 'array_4': ''}
form = SplitForm(data)
self.assertTrue(form.is_valid(), form.errors)
self.assertEqual(form.cleaned_data, {'array': ['a', '', 'b']})
def test_remove_trailing_nulls_not_required(self):
class SplitForm(forms.Form):
array = SplitArrayField(
forms.CharField(required=False),
size=2,
remove_trailing_nulls=True,
required=False,
)
data = {'array_0': '', 'array_1': ''}
form = SplitForm(data)
self.assertTrue(form.is_valid())
self.assertEqual(form.cleaned_data, {'array': []})
def test_required_field(self):
class SplitForm(forms.Form):
array = SplitArrayField(forms.CharField(), size=3)
data = {'array_0': 'a', 'array_1': 'b', 'array_2': ''}
form = SplitForm(data)
self.assertFalse(form.is_valid())
self.assertEqual(form.errors, {'array': ['Item 2 in the array did not validate: This field is required.']})
def test_invalid_integer(self):
msg = 'Item 1 in the array did not validate: Ensure this value is less than or equal to 100.'
with self.assertRaisesMessage(exceptions.ValidationError, msg):
SplitArrayField(forms.IntegerField(max_value=100), size=2).clean([0, 101])
# To locate the widget's template.
@modify_settings(INSTALLED_APPS={'append': 'django.contrib.postgres'})
def test_rendering(self):
class SplitForm(forms.Form):
array = SplitArrayField(forms.CharField(), size=3)
self.assertHTMLEqual(str(SplitForm()), '''
<tr>
<th><label for="id_array_0">Array:</label></th>
<td>
<input id="id_array_0" name="array_0" type="text" required />
<input id="id_array_1" name="array_1" type="text" required />
<input id="id_array_2" name="array_2" type="text" required />
</td>
</tr>
''')
def test_invalid_char_length(self):
field = SplitArrayField(forms.CharField(max_length=2), size=3)
with self.assertRaises(exceptions.ValidationError) as cm:
field.clean(['abc', 'c', 'defg'])
self.assertEqual(cm.exception.messages, [
'Item 0 in the array did not validate: Ensure this value has at most 2 characters (it has 3).',
'Item 2 in the array did not validate: Ensure this value has at most 2 characters (it has 4).',
])
def test_splitarraywidget_value_omitted_from_data(self):
class Form(forms.ModelForm):
field = SplitArrayField(forms.IntegerField(), required=False, size=2)
class Meta:
model = IntegerArrayModel
fields = ('field',)
form = Form({'field_0': '1', 'field_1': '2'})
self.assertEqual(form.errors, {})
obj = form.save(commit=False)
self.assertEqual(obj.field, [1, 2])
class TestSplitFormWidget(PostgreSQLWidgetTestCase):
def test_get_context(self):
self.assertEqual(
SplitArrayWidget(forms.TextInput(), size=2).get_context('name', ['val1', 'val2']),
{
'widget': {
'name': 'name',
'is_hidden': False,
'required': False,
'value': "['val1', 'val2']",
'attrs': {},
'template_name': 'postgres/widgets/split_array.html',
'subwidgets': [
{
'name': 'name_0',
'is_hidden': False,
'required': False,
'value': 'val1',
'attrs': {},
'template_name': 'django/forms/widgets/text.html',
'type': 'text',
},
{
'name': 'name_1',
'is_hidden': False,
'required': False,
'value': 'val2',
'attrs': {},
'template_name': 'django/forms/widgets/text.html',
'type': 'text',
},
]
}
}
)
def test_checkbox_get_context_attrs(self):
context = SplitArrayWidget(
forms.CheckboxInput(),
size=2,
).get_context('name', [True, False])
self.assertEqual(context['widget']['value'], '[True, False]')
self.assertEqual(
[subwidget['attrs'] for subwidget in context['widget']['subwidgets']],
[{'checked': True}, {}]
)
def test_render(self):
self.check_html(
SplitArrayWidget(forms.TextInput(), size=2), 'array', None,
"""
<input name="array_0" type="text" />
<input name="array_1" type="text" />
"""
)
def test_render_attrs(self):
self.check_html(
SplitArrayWidget(forms.TextInput(), size=2),
'array', ['val1', 'val2'], attrs={'id': 'foo'},
html=(
"""
<input id="foo_0" name="array_0" type="text" value="val1" />
<input id="foo_1" name="array_1" type="text" value="val2" />
"""
)
)
def test_value_omitted_from_data(self):
widget = SplitArrayWidget(forms.TextInput(), size=2)
self.assertIs(widget.value_omitted_from_data({}, {}, 'field'), True)
self.assertIs(widget.value_omitted_from_data({'field_0': 'value'}, {}, 'field'), False)
self.assertIs(widget.value_omitted_from_data({'field_1': 'value'}, {}, 'field'), False)
self.assertIs(widget.value_omitted_from_data({'field_0': 'value', 'field_1': 'value'}, {}, 'field'), False)
|
|
"""
The Daemon Extension enables applications Built on Cement (tm) to
easily perform standard daemonization functions.
Requirements
------------
* Python 2.6+, Python 3+
* Available on Unix/Linux only
Features
--------
* Configurable runtime user and group
* Adds the ``--daemon`` command line option
* Adds ``app.daemonize()`` function to trigger daemon functionality where
necessary (either in a cement ``pre_run`` hook or an application controller
sub-command, etc)
* Manages a pid file including cleanup on ``app.close()``
Configuration
-------------
The daemon extension is configurable with the following settings under the
[daemon] section.
* **user** - The user name to run the process as.
Default: os.getlogin()
* **group** - The group name to run the process as.
Default: The primary group of the 'user'.
* **dir** - The directory to run the process in.
Default: /
* **pid_file** - The filesystem path to store the PID (Process ID) file.
Default: None
* **umask** - The umask value to pass to os.umask().
Default: 0
Configurations can be passed as defaults to a CementApp:
.. code-block:: python
from cement.core.foundation import CementApp
from cement.utils.misc import init_defaults
defaults = init_defaults('myapp', 'daemon')
defaults['daemon']['user'] = 'myuser'
defaults['daemon']['group'] = 'mygroup'
defaults['daemon']['dir'] = '/var/lib/myapp/'
defaults['daemon']['pid_file'] = '/var/run/myapp/myapp.pid'
defaults['daemon']['umask'] = 0
app = CementApp('myapp', config_defaults=defaults)
Application defaults are then overridden by configurations parsed via a
``[demon]`` config section in any of the applications configuration paths.
An example configuration block would look like:
.. code-block:: text
[daemon]
user = myuser
group = mygroup
dir = /var/lib/myapp/
pid_file = /var/run/myapp/myapp.pid
umask = 0
Usage
-----
The following example shows how to add the daemon extension, as well as
trigger daemon functionality before ``app.run()`` is called.
.. code-block:: python
from time import sleep
from cement.core.foundation import CementApp
class MyApp(CementApp):
class Meta:
label = 'myapp'
extensions = ['daemon']
with MyApp() as app:
app.daemonize()
app.run()
count = 0
while True:
count = count + 1
print('Iteration: %s' % count)
sleep(10)
An alternative to the above is to put the ``daemonize()`` call within a
framework hook:
.. code-block:: python
def make_daemon(app):
app.daemonize()
def load(app):
app.hook.register('pre_run', make_daemon)
Finally, some applications may prefer to only daemonize certain sub-commands
rather than the entire parent application. For example:
.. code-block:: python
from cement.core.foundation import CementApp
from cement.core.controller import CementBaseController, expose
class MyBaseController(CementBaseController):
class Meta:
label = 'base'
@expose(help="run the daemon command.")
def run_forever(self):
from time import sleep
self.app.daemonize()
count = 0
while True:
count = count + 1
print(count)
sleep(10)
class MyApp(CementApp):
class Meta:
label = 'myapp'
base_controller = MyBaseController
extensions = ['daemon']
with MyApp() as app:
app.run()
By default, even after ``app.daemonize()`` is called... the application will
continue to run in the foreground, but will still manage the pid and
user/group switching. To detach a process and send it to the background you
simply pass the ``--daemon`` option at command line.
.. code-block:: text
$ python example.py --daemon
$ ps -x | grep example
37421 ?? 0:00.01 python example2.py --daemon
37452 ttys000 0:00.00 grep example
"""
import os
import sys
import io
import pwd
import grp
from ..core import exc
from ..utils.misc import minimal_logger
LOG = minimal_logger(__name__)
LOG = minimal_logger(__name__)
CEMENT_DAEMON_ENV = None
CEMENT_DAEMON_APP = None
class Environment(object):
"""
This class provides a mechanism for altering the running processes
environment.
Optional Arguments:
:keyword stdin: A file to read STDIN from. Default: ``/dev/null``
:keyword stdout: A file to write STDOUT to. Default: ``/dev/null``
:keyword stderr: A file to write STDERR to. Default: ``/dev/null``
:keyword dir: The directory to run the process in.
:keyword pid_file: The filesystem path to where the PID (Process ID)
should be written to. Default: None
:keyword user: The user name to run the process as.
Default: ``os.getlogin()``
:keyword group: The group name to run the process as.
Default: The primary group of ``os.getlogin()``.
:keyword umask: The umask to pass to os.umask(). Default: ``0``
"""
def __init__(self, **kw):
self.stdin = kw.get('stdin', '/dev/null')
self.stdout = kw.get('stdout', '/dev/null')
self.stderr = kw.get('stderr', '/dev/null')
self.dir = kw.get('dir', os.curdir)
self.pid_file = kw.get('pid_file', None)
self.umask = kw.get('umask', 0)
self.user = kw.get('user', pwd.getpwuid(os.getuid()).pw_name)
# clean up
self.dir = os.path.abspath(os.path.expanduser(self.dir))
if self.pid_file:
self.pid_file = os.path.abspath(os.path.expanduser(self.pid_file))
try:
self.user = pwd.getpwnam(self.user)
except KeyError as e:
raise exc.FrameworkError("Daemon user '%s' doesn't exist." %
self.user)
try:
self.group = kw.get('group',
grp.getgrgid(self.user.pw_gid).gr_name)
self.group = grp.getgrnam(self.group)
except KeyError as e:
raise exc.FrameworkError("Daemon group '%s' doesn't exist." %
self.group)
def _write_pid_file(self):
"""
Writes ``os.getpid()`` out to ``self.pid_file``.
"""
pid = str(os.getpid())
LOG.debug('writing pid (%s) out to %s' % (pid, self.pid_file))
# setup pid
if self.pid_file:
f = open(self.pid_file, 'w')
f.write(pid)
f.close()
os.chown(self.pid_file, self.user.pw_uid, self.group.gr_gid)
def switch(self):
"""
Switch the current process's user/group to ``self.user``, and
``self.group``. Change directory to ``self.dir``, and write the
current pid out to ``self.pid_file``.
"""
# set the running uid/gid
LOG.debug('setting process uid(%s) and gid(%s)' %
(self.user.pw_uid, self.group.gr_gid))
os.setgid(self.group.gr_gid)
os.setuid(self.user.pw_uid)
os.environ['HOME'] = self.user.pw_dir
os.chdir(self.dir)
if self.pid_file and os.path.exists(self.pid_file):
raise exc.FrameworkError("Process already running (%s)" %
self.pid_file)
else:
self._write_pid_file()
def daemonize(self): # pragma: no cover
"""
Fork the current process into a daemon.
References:
UNIX Programming FAQ:
1.7 How do I get my program to act like a daemon?
http://www.unixguide.net/unix/programming/1.7.shtml
http://www.faqs.org/faqs/unix-faq/programmer/faq/
Advanced Programming in the Unix Environment
W. Richard Stevens, 1992, Addison-Wesley, ISBN 0-201-56317-7.
"""
LOG.debug('attempting to daemonize the current process')
# Do first fork.
try:
pid = os.fork()
if pid > 0:
LOG.debug('successfully detached from first parent')
os._exit(os.EX_OK)
except OSError as e:
sys.stderr.write("Fork #1 failed: (%d) %s\n" %
(e.errno, e.strerror))
sys.exit(1)
# Decouple from parent environment.
os.chdir(self.dir)
os.umask(int(self.umask))
os.setsid()
# Do second fork.
try:
pid = os.fork()
if pid > 0:
LOG.debug('successfully detached from second parent')
os._exit(os.EX_OK)
except OSError as e:
sys.stderr.write("Fork #2 failed: (%d) %s\n" %
(e.errno, e.strerror))
sys.exit(1)
# Redirect standard file descriptors.
stdin = open(self.stdin, 'r')
stdout = open(self.stdout, 'a+')
stderr = open(self.stderr, 'a+')
if hasattr(sys.stdin, 'fileno'):
try:
os.dup2(stdin.fileno(), sys.stdin.fileno())
except io.UnsupportedOperation as e:
# FIXME: ?
pass
if hasattr(sys.stdout, 'fileno'):
try:
os.dup2(stdout.fileno(), sys.stdout.fileno())
except io.UnsupportedOperation as e:
# FIXME: ?
pass
if hasattr(sys.stderr, 'fileno'):
try:
os.dup2(stderr.fileno(), sys.stderr.fileno())
except io.UnsupportedOperation as e:
# FIXME: ?
pass
# Update our pid file
self._write_pid_file()
def daemonize(): # pragma: no cover
"""
This function switches the running user/group to that configured in
``config['daemon']['user']`` and ``config['daemon']['group']``. The
default user is ``os.getlogin()`` and the default group is that user's
primary group. A pid_file and directory to run in is also passed to the
environment.
It is important to note that with the daemon extension enabled, the
environment will switch user/group/set pid/etc regardless of whether
the ``--daemon`` option was passed at command line or not. However, the
process will only 'daemonize' if the option is passed to do so. This
allows the program to run exactly the same in forground or background.
"""
# We want to honor the runtime user/group/etc even if --daemon is not
# passed... but only daemonize if it is.
global CEMENT_DAEMON_ENV
global CEMENT_DAEMON_APP
app = CEMENT_DAEMON_APP
CEMENT_DAEMON_ENV = Environment(
user=app.config.get('daemon', 'user'),
group=app.config.get('daemon', 'group'),
pid_file=app.config.get('daemon', 'pid_file'),
dir=app.config.get('daemon', 'dir'),
umask=app.config.get('daemon', 'umask'),
)
CEMENT_DAEMON_ENV.switch()
if '--daemon' in app.argv:
CEMENT_DAEMON_ENV.daemonize()
def extend_app(app):
"""
Adds the ``--daemon`` argument to the argument object, and sets the
default ``[daemon]`` config section options.
"""
global CEMENT_DAEMON_APP
CEMENT_DAEMON_APP = app
app.args.add_argument('--daemon', dest='daemon',
action='store_true', help='daemonize the process')
# Add default config
user = pwd.getpwuid(os.getuid())
group = grp.getgrgid(user.pw_gid)
defaults = dict()
defaults['daemon'] = dict()
defaults['daemon']['user'] = user.pw_name
defaults['daemon']['group'] = group.gr_name
defaults['daemon']['pid_file'] = None
defaults['daemon']['dir'] = '/'
defaults['daemon']['umask'] = 0
app.config.merge(defaults, override=False)
app.extend('daemonize', daemonize)
def cleanup(app): # pragma: no cover
"""
After application run time, this hook just attempts to clean up the
pid_file if one was set, and exists.
"""
global CEMENT_DAEMON_ENV
if CEMENT_DAEMON_ENV and CEMENT_DAEMON_ENV.pid_file:
if os.path.exists(CEMENT_DAEMON_ENV.pid_file):
LOG.debug('Cleaning up pid_file...')
pid = open(CEMENT_DAEMON_ENV.pid_file, 'r').read().strip()
# only remove it if we created it.
if int(pid) == int(os.getpid()):
os.remove(CEMENT_DAEMON_ENV.pid_file)
def load(app):
app.hook.register('post_setup', extend_app)
app.hook.register('pre_close', cleanup)
|
|
#this module is dedicated to the resolve issues occuring when writing a text onto image
#- optimize font size for single line texte
#- optimize font size AND carlim for multiline text
#- resolve incorrect textSize estimation for single line texte
from PIL import Image, ImageFont, ImageDraw
import textwrap
#from ImageEdit import *
def getTrueMetrics(font,color,text):#get the true position and the the true dimension of the text
imtemp=Image.new("RGB", (10, 10), "black")
drawtemp = ImageDraw.Draw(imtemp)
Approxlimits=drawtemp.textsize(text,font)
print(Approxlimits)
im=Image.new("RGB", (int(Approxlimits[0]*1.3), int(Approxlimits[1]*1.3)), "black")
draw= ImageDraw.Draw(im)
try:
draw.text((Approxlimits[0]*0.15, Approxlimits[1]*0.15), text, fill="white", font=font)
except:
draw.text((Approxlimits[0]*0.15, Approxlimits[1]*0.15), text, font=font)##avoid PIL bug with palette
box=im.getbbox()
trueSize=(box[2]-box[0],box[3]-box[1])
correctionPosition=(Approxlimits[0]*0.15-box[0],Approxlimits[1]*0.15-box[1])
return correctionPosition,trueSize
def paste_imageNoWrap(path,square,font,color,text,correction=True):
im = Image.open(path)
if correction:
correction,size=getTrueMetrics(font,color,text)
im=simpleTextPaste(im,square,font,color,text,correction,size)
return im
def GetLongestWordLength (text):
textlist=text.split()
longestWord=0
for word in textlist:
if(len(word)>longestWord):
longestWord=len(word)
return(longestWord)
def getWrapDimensions(lines,font,fontsize):
widthMax=0
interline=int(fontsize/8)
wrapHeight=0
for line in lines: ##for loop only to determine widthMax
width, height = font.getsize(line)
wrapHeight+=(height+interline)
if(width>widthMax):
widthMax=width
return widthMax,wrapHeight
def firstFontEstimation(fontname,nbcar,freeSpace):
fontsize=12
TooSmall=True
font = ImageFont.truetype(fontname, fontsize)
letterArea2=font.getsize("A")
AimedletterArea=freeSpace/(nbcar)
RealletterArea=letterArea2[0]*letterArea2[1]
while (RealletterArea<AimedletterArea):
fontsize+=1
font = ImageFont.truetype(fontname, fontsize)
letterArea2=font.getsize("A")
RealletterArea=letterArea2[0]*letterArea2[1]
return fontsize
def calibrateCarMax(font,widthMax,longestWord):
letterSize=font.getsize("a")[0]
carLim=max(longestWord,int(widthMax/letterSize))
return carLim
def isWrapTooTall(lines,font,fontsizeInit,heightMax):
wrapHeight=getWrapDimensions(lines,font,fontsizeInit)[1]
if(wrapHeight>heightMax):
return True
else:
return False
def isWrapTooLarge(lines,font,fontsizeInit,WidthMax):
wrapWidth=getWrapDimensions(lines,font,fontsizeInit)[0]
if(wrapWidth>WidthMax):
return True
else:
return False
def initCarAndFont(text,fontname,widthMax,heightMax):
fontSizeInit=firstFontEstimation(fontname,len(text),widthMax*heightMax)
font = ImageFont.truetype(fontname, fontSizeInit)
longestWord=GetLongestWordLength(text)
carMax=calibrateCarMax(font,widthMax,longestWord)
return (fontSizeInit,carMax)
def optimize(text,fontname,widthMax,heightMax,fontSizeinit,carlimInit):
fontSize=fontSizeinit
carlim=carlimInit
longestWord=GetLongestWordLength(text)
## print(fontSize,carlim)
converged=False
count=0
while not(converged):
carlimold=carlim
fontSizeold=fontSize
count+=1
if(count<5):
fontSize=NonConservativeOptimizeFontSize(text,fontname,fontSize,heightMax,carlim)
carlim= optimizeCarLim(text,fontname,fontSize,widthMax,carlim,longestWord)
else:
carlimNew= optimizeCarLim(text,fontname,fontSize,widthMax,carlim,longestWord)
fontSizeNew=ConservativeOptimizeFontSize(text,fontname,fontSize,heightMax,widthMax,carlim)
carlim=(carlimNew+carlim)/2#avoid oscillations
fontSize=int((fontSizeNew+fontSize)/2)
converged=((carlimold==carlim)and(fontSizeold==fontSize))or(count==20)
##print(fontSize,carlim)
return (fontSize,carlim)
def optimizeFontSizeAndCarLim(text,fontname,widthMax,heightMax):
(fontSizeinit,carlimInit)=initCarAndFont(text,fontname,widthMax,heightMax)
(fontSize,carlim)=optimize(text,fontname,widthMax,heightMax,fontSizeinit,carlimInit)
return fontSize,carlim
def ConservativeOptimizeFontSizeNoWrap(text,fontname,fontsize,heightMax,widthMax):
font = ImageFont.truetype(fontname, fontsize)
limits = font.getsize(text)
TooLarge=limits[0]>widthMax
TooTall=limits[1]>heightMax
TooBig=TooTall or TooLarge
if TooBig:
while (TooBig):
print(fontsize)
fontsize-=1
font = ImageFont.truetype(fontname, fontsize)
limits = font.getsize(text)
TooLarge=limits[0]>widthMax
TooTall=limits[1]>heightMax
TooBig=TooTall or TooLarge
return fontsize
else:
while not(TooBig):
print(fontsize)
fontsize+=1
font = ImageFont.truetype(fontname, fontsize)
limits = font.getsize(text)
TooLarge=limits[0]>widthMax
TooTall=limits[1]>heightMax
TooBig=TooTall or TooLarge
return fontsize-1
def ConservativeOptimizeFontSize(text,fontname,fontsizeInit,heightMax,widthMax,carMax):
font = ImageFont.truetype(fontname, fontsizeInit)
lines = textwrap.wrap(text, width = carMax)
fontsize=fontsizeInit
TooTall=isWrapTooTall(lines,font,fontsizeInit,heightMax)
TooLarge=isWrapTooLarge(lines,font,fontsize,widthMax)
TooBig=TooTall or TooLarge
if TooBig:
while (TooBig):
fontsize-=1
font = ImageFont.truetype(fontname, fontsize)
TooTall=isWrapTooTall(lines,font,fontsize,heightMax)
TooLarge=isWrapTooLarge(lines,font,fontsize,widthMax)
TooBig=TooTall or TooLarge
return fontsize
else:
while not(TooBig):
fontsize+=1
font = ImageFont.truetype(fontname, fontsize)
TooTall=isWrapTooTall(lines,font,fontsize,heightMax)
TooLarge=isWrapTooLarge(lines,font,fontsize,widthMax)
TooBig=TooTall or TooLarge
return fontsize-1
def NonConservativeOptimizeFontSize(text,fontname,fontsizeInit,heightMax,carMax):
font = ImageFont.truetype(fontname, fontsizeInit)
lines = textwrap.wrap(text, width = carMax)
fontsize=fontsizeInit
TooTall=isWrapTooTall(lines,font,fontsizeInit,heightMax)
if TooTall:
while (TooTall):
fontsize-=1
font = ImageFont.truetype(fontname, fontsize)
TooTall=isWrapTooTall(lines,font,fontsize,heightMax)
return fontsize
else:
while not(TooTall):
fontsize+=1
font = ImageFont.truetype(fontname, fontsize)
TooTall=isWrapTooTall(lines,font,fontsize,heightMax)
return fontsize-1
def optimizeCarLim(text,fontName,fontsize,widthMax,carMaxInit,longestWord):
font = ImageFont.truetype(fontName, fontsize)
lines = textwrap.wrap(text, width = carMaxInit)
TooLarge=isWrapTooLarge(lines,font,fontsize,widthMax)
carMax=carMaxInit
if TooLarge:
while (TooLarge):##avoiding the longest word being cut
if(carMax<longestWord+1):
return(carMax)
carMax-=1
lines = textwrap.wrap(text, width = carMax)
TooLarge=isWrapTooLarge(lines,font,fontsize,widthMax)
return (carMax)
else:
while not(TooLarge):
if(len(text)==carMax):
return(carMax)
carMax+=1
lines = textwrap.wrap(text, width = carMax)
TooLarge=isWrapTooLarge(lines,font,fontsize,widthMax)
return (carMax-1)
def findBestFontSize(square,text,fontName,multiline):
widthLimit=square.end_x-square.start_x
heightLimit=square.end_y-square.start_y
fontSize=12
toobig=False
font = ImageFont.truetype(fontName, fontSize)
if (multiline=="false"):
while not toobig:
limits=font.getsize(text)
if((limits[0]>widthLimit)or(limits[1]>heightLimit)):
toobig=True
else:
fontSize+=1
font = ImageFont.truetype(fontName, fontSize)
return ImageFont.truetype(fontName, fontSize-1),False
else:
carLim=int(widthLimit/font.getsize("A")[0])##necessary to avoid bug on carlimprec
while not toobig:
limits=font.getsize(text)
fontHeight=limits[1]
interline=int(fontHeight/8)
carlimprec=carLim ##usefull to get the precedent carlim when toobig=true
carLim=int(widthLimit/font.getsize("A")[0])
lines = textwrap.wrap(text, width = carLim)
wrapHeight=len(lines)*(fontHeight+interline)
if(wrapHeight>heightLimit):
toobig=True
else:
fontSize+=1
font = ImageFont.truetype(fontName, fontSize)
print(carLim,len(lines),fontHeight,wrapHeight,fontSize-1)
return ImageFont.truetype(fontName, fontSize-1),carlimprec
|
|
#!/usr/bin/env python
# Copyright 2019 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Merge results from code-coverage/pgo swarming runs.
This script merges code-coverage/pgo profiles from multiple shards. It also
merges the test results of the shards.
It is functionally similar to merge_steps.py but it accepts the parameters
passed by swarming api.
"""
import argparse
import json
import logging
import os
import subprocess
import sys
import merge_lib as profile_merger
import merge_js_lib as javascript_merger
def _MergeAPIArgumentParser(*args, **kwargs):
"""Parameters passed to this merge script, as per:
https://chromium.googlesource.com/chromium/tools/build/+/main/scripts/slave/recipe_modules/swarming/resources/merge_api.py
"""
parser = argparse.ArgumentParser(*args, **kwargs)
parser.add_argument('--build-properties', help=argparse.SUPPRESS)
parser.add_argument('--summary-json', help=argparse.SUPPRESS)
parser.add_argument('--task-output-dir', help=argparse.SUPPRESS)
parser.add_argument(
'-o', '--output-json', required=True, help=argparse.SUPPRESS)
parser.add_argument('jsons_to_merge', nargs='*', help=argparse.SUPPRESS)
# Custom arguments for this merge script.
parser.add_argument(
'--additional-merge-script', help='additional merge script to run')
parser.add_argument(
'--additional-merge-script-args',
help='JSON serialized string of args for the additional merge script')
parser.add_argument(
'--profdata-dir', required=True, help='where to store the merged data')
parser.add_argument(
'--llvm-profdata', required=True, help='path to llvm-profdata executable')
parser.add_argument('--test-target-name', help='test target name')
parser.add_argument(
'--java-coverage-dir', help='directory for Java coverage data')
parser.add_argument(
'--jacococli-path', help='path to jacococli.jar.')
parser.add_argument(
'--merged-jacoco-filename',
help='filename used to uniquely name the merged exec file.')
parser.add_argument(
'--javascript-coverage-dir',
help='directory for JavaScript coverage data')
parser.add_argument(
'--merged-js-cov-filename', help='filename to uniquely identify merged '
'json coverage data')
parser.add_argument(
'--per-cl-coverage',
action='store_true',
help='set to indicate that this is a per-CL coverage build')
parser.add_argument(
'--sparse',
action='store_true',
dest='sparse',
help='run llvm-profdata with the sparse flag.')
# (crbug.com/1091310) - IR PGO is incompatible with the initial conversion
# of .profraw -> .profdata that's run to detect validation errors.
# Introducing a bypass flag that'll merge all .profraw directly to .profdata
parser.add_argument(
'--skip-validation',
action='store_true',
help='skip validation for good raw profile data. this will pass all '
'raw profiles found to llvm-profdata to be merged. only applicable '
'when input extension is .profraw.')
return parser
def main():
desc = "Merge profraw files in <--task-output-dir> into a single profdata."
parser = _MergeAPIArgumentParser(description=desc)
params = parser.parse_args()
if params.java_coverage_dir:
if not params.jacococli_path:
parser.error('--jacococli-path required when merging Java coverage')
if not params.merged_jacoco_filename:
parser.error(
'--merged-jacoco-filename required when merging Java coverage')
output_path = os.path.join(
params.java_coverage_dir, '%s.exec' % params.merged_jacoco_filename)
logging.info('Merging JaCoCo .exec files to %s', output_path)
profile_merger.merge_java_exec_files(
params.task_output_dir, output_path, params.jacococli_path)
if params.javascript_coverage_dir:
if not params.merged_js_cov_filename:
parser.error('--merged-js-cov-filename required when merging '
'JavaScript coverage')
parsed_scripts = javascript_merger.write_parsed_scripts(
params.task_output_dir)
if parsed_scripts:
logging.info('Raw parsed scripts written out to %s', parsed_scripts)
coverage_dirs = javascript_merger.get_raw_coverage_dirs(
params.task_output_dir)
logging.info(
'Identified directories containing coverage %s', coverage_dirs)
try:
logging.info('Converting raw coverage to istanbul')
javascript_merger.convert_raw_coverage_to_istanbul(
coverage_dirs, parsed_scripts, params.task_output_dir)
istanbul_coverage_dir = os.path.join(params.task_output_dir, 'istanbul')
output_dir = os.path.join(istanbul_coverage_dir, 'merged')
os.makedirs(output_dir)
coverage_file_path = os.path.join(output_dir, 'coverage.json')
logging.info('Merging istanbul reports to %s', coverage_file_path)
javascript_merger.merge_istanbul_reports(
istanbul_coverage_dir, parsed_scripts, coverage_file_path)
except RuntimeError as e:
logging.warn('Failed executing istanbul tasks: %s', e.message)
# Ensure JavaScript coverage dir exists.
if not os.path.exists(params.javascript_coverage_dir):
os.makedirs(params.javascript_coverage_dir)
output_path = os.path.join(params.javascript_coverage_dir,
'%s_javascript.json' % params.merged_js_cov_filename)
logging.info('Merging v8 coverage output to %s', output_path)
javascript_merger.merge_coverage_files(params.task_output_dir, output_path)
# Name the output profdata file name as {test_target}.profdata or
# default.profdata.
output_prodata_filename = (params.test_target_name or 'default') + '.profdata'
# NOTE: The profile data merge script must make sure that the profraw files
# are deleted from the task output directory after merging, otherwise, other
# test results merge script such as layout tests will treat them as json test
# results files and result in errors.
invalid_profiles, counter_overflows = profile_merger.merge_profiles(
params.task_output_dir,
os.path.join(params.profdata_dir, output_prodata_filename), '.profraw',
params.llvm_profdata,
sparse=params.sparse,
skip_validation=params.skip_validation)
# At the moment counter overflows overlap with invalid profiles, but this is
# not guaranteed to remain the case indefinitely. To avoid future conflicts
# treat these separately.
if counter_overflows:
with open(
os.path.join(params.profdata_dir, 'profiles_with_overflows.json'),
'w') as f:
json.dump(counter_overflows, f)
if invalid_profiles:
with open(os.path.join(params.profdata_dir, 'invalid_profiles.json'),
'w') as f:
json.dump(invalid_profiles, f)
failed = False
# If given, always run the additional merge script, even if we only have one
# output json. Merge scripts sometimes upload artifacts to cloud storage, or
# do other processing which can be needed even if there's only one output.
if params.additional_merge_script:
new_args = [
'--build-properties',
params.build_properties,
'--summary-json',
params.summary_json,
'--task-output-dir',
params.task_output_dir,
'--output-json',
params.output_json,
]
if params.additional_merge_script_args:
new_args += json.loads(params.additional_merge_script_args)
new_args += params.jsons_to_merge
args = [sys.executable, params.additional_merge_script] + new_args
rc = subprocess.call(args)
if rc != 0:
failed = True
logging.warning('Additional merge script %s exited with %s' %
(params.additional_merge_script, rc))
elif len(params.jsons_to_merge) == 1:
logging.info("Only one output needs to be merged; directly copying it.")
with open(params.jsons_to_merge[0]) as f_read:
with open(params.output_json, 'w') as f_write:
f_write.write(f_read.read())
else:
logging.warning(
'This script was told to merge test results, but no additional merge '
'script was given.')
return 1 if (failed or bool(invalid_profiles)) else 0
if __name__ == '__main__':
logging.basicConfig(
format='[%(asctime)s %(levelname)s] %(message)s', level=logging.INFO)
sys.exit(main())
|
|
# -*- coding: utf-8 -*-
# Imports
# -------
import time
import common
import hashlib
import logging
import threading
# Data
# ----
# Monitor data indices
K_IP = 0
K_PORT = 1
K_SYSINFO = 2
K_TIMESTAMP = 3
K_RWLOCK = 4
# Contains monitor information { id -> (ip, port, sysinfodao, time, rwlock) }
monitor_db = { }
# Exclusive acces to modify monitor_db
monitor_db_lock = common.ReadWriteLock()
# Functions
# ---------
def initializeNewMonitorData(ip, port):
"""initializeNewMonitorData(ip: str, port: str) -> str
Generates a monitor id with the given ip and port and initializes an
entry in the internal db for this new monitor id.
Returns the generated monitor id.
"""
common.assertType(ip, str, "Expected ip to be a string value")
common.assertType(port, str, "Expected port to be a string value")
sha256 = hashlib.sha256()
sha256.update(ip)
sha256.update(port)
mid = sha256.hexdigest()
monitor_db_lock.acquireWrite()
monitor_db[mid] = [ip, port, None, time.time(), common.ReadWriteLock()]
monitor_db_lock.release()
return mid
def updateMonitorData(mid, sinfodao):
"""updateMonitorData(mid: str, sinfodao: SysInfoDAO) -> void
Updates the information of the specified client.
"""
common.assertType(mid, str, "Expeced monitor id to be a string value")
common.assertType(sinfodao, common.SysInfoDAO, "Expected SysInfoDAO")
mdata = _getMonitor(mid)
mdata[K_RWLOCK].acquireWrite()
mdata[K_SYSINFO] = sinfodao
mdata[K_TIMESTAMP] = time.time()
mdata[K_RWLOCK].release()
def getMonitorData(mid):
"""getMonitorData(mid: str) -> SysInfoDAO, str, str
Returns the stored data of a monitor or raises a KeyError exception if the
monitor does not exist.
Returns the tuple (SysInfoDAO, ip, port)
"""
common.assertType(mid, str, "Expected monitor id to be a string value")
mdata = _getMonitor(mid)
mdata[K_RWLOCK].acquireRead()
sinfodao = mdata[K_SYSINFO]
port = mdata[K_PORT]
ip = mdata[K_IP]
mdata[K_RWLOCK].release()
return sinfodao, ip, port
def getAllMonitorsData():
"""getAllMonitorsData() -> [ (SysInfoDAO, str, str) ]
Returns a list with all the SysInfoDAO on the DB.
Returns a list of tuples with (SysInfoDAO, ip, port)
"""
monitor_db_lock.acquireRead()
mdatal = [ (v[K_SYSINFO], v[K_IP], v[K_PORT])
for k, v in monitor_db.iteritems() ]
monitor_db_lock.release()
return mdatal
def getListOfMonitors():
"""getListOfMonitors() -> [str]
Returns a list filled with the id of all the stored monitors.
"""
monitor_db_lock.acquireRead()
midlist = [k for k in monitor_db.iterkeys()]
monitor_db_lock.release()
return midlist
def keepAliveMonitor(mid):
"""keepAliveMonitor(mid: str) -> void
Prolongs the life of a monitor 'data_life_time'.
"""
common.assertType(mid, str, "Expected monitor id to be a string value")
mdata = _getMonitor(mid)
mdata[K_RWLOCK].acquireWrite()
mdata[K_TIMESTAMP] = time.time()
mdata[K_RWLOCK].release()
def existsMonitorData(mid):
"""existsMonitorData(mid: str) -> bool
Returns true if the DB contains data of the specified monitor
"""
common.assertType(mid, str, "Expected monitor id to be a string value")
monitor_db_lock.acquireRead()
ret = mid in monitor_db
monitor_db_lock.release()
return ret
# Get an internal reference to a monitor data
def _getMonitor(mid):
monitor_db_lock.acquireRead()
mdata = monitor_db[mid]
monitor_db_lock.release()
return mdata
def removeOldMonitorData(max_time):
"""removeOldMonitorData(max_time: float) -> void
Remove data in the monitor's DB older than max_time seconds.
"""
monitor_db_lock.acquireWrite()
cur_time = time.time()
for mid in monitor_db.keys():
if cur_time - monitor_db[mid][K_TIMESTAMP] > max_time:
del monitor_db[mid]
logging.debug("Droped data of: %s" % mid)
monitor_db_lock.release()
#
## Cleans the database every data life time
class DBGarbageCollector(threading.Thread):
def __init__(self, gc_time, data_life_time):
super(DBGarbageCollector, self).__init__()
self._gc_time = gc_time
self._data_time = data_life_time
self._awakener = threading.Event()
self._mutex = threading.RLock()
self._active = True
## Override
def run(self):
self._setRunState()
self._mutex.acquire() # lock and check active state
while self._active:
self._mutex.release() # unlock and wait
if self._awakener.wait(self._gc_time):
self._awakener.clear() # Resets the internal flag
removeOldMonitorData(self._data_time)
self._mutex.acquire() # lock to check the active state
self._mutex.release() # comes out with the mutex acquired
## Sets internal data to a consistent state before starting the run loop
def _setRunState(self):
self._awakener.clear() # Clears internal flag before first wait
self._mutex.acquire()
self._active = True
self._mutex.release()
@staticmethod
def stop(db_gc):
"""stop() -> void
Given a DBGarbageCollector instance set the stop flag and waits
until the collector thread termination.
"""
db_gc._mutex.acquire()
db_gc._active = False
db_gc._awakener.set()
db_gc._mutex.release()
db_gc.join()
|
|
#!/usr/bin/env python
#
# argdist Trace a function and display a distribution of its
# parameter values as a histogram or frequency count.
#
# USAGE: argdist [-h] [-p PID] [-z STRING_SIZE] [-i INTERVAL]
# [-n COUNT] [-v] [-T TOP]
# [-C specifier [specifier ...]]
# [-H specifier [specifier ...]]
# [-I header [header ...]]
#
# Licensed under the Apache License, Version 2.0 (the "License")
# Copyright (C) 2016 Sasha Goldshtein.
from bcc import BPF, Tracepoint, Perf, ProcUtils, USDTReader
from time import sleep, strftime
import argparse
import re
import traceback
import os
import sys
class Probe(object):
next_probe_index = 0
aliases = { "$PID": "bpf_get_current_pid_tgid()" }
def _substitute_aliases(self, expr):
if expr is None:
return expr
for alias, subst in Probe.aliases.items():
expr = expr.replace(alias, subst)
return expr
def _parse_signature(self):
params = map(str.strip, self.signature.split(','))
self.param_types = {}
for param in params:
# If the type is a pointer, the * can be next to the
# param name. Other complex types like arrays are not
# supported right now.
index = param.rfind('*')
index = index if index != -1 else param.rfind(' ')
param_type = param[0:index+1].strip()
param_name = param[index+1:].strip()
self.param_types[param_name] = param_type
def _generate_entry(self):
self.entry_probe_func = self.probe_func_name + "_entry"
text = """
int PROBENAME(struct pt_regs *ctx SIGNATURE)
{
u32 pid = bpf_get_current_pid_tgid();
PID_FILTER
COLLECT
return 0;
}
"""
text = text.replace("PROBENAME", self.entry_probe_func)
text = text.replace("SIGNATURE",
"" if len(self.signature) == 0 else ", " + self.signature)
pid_filter = "" if self.is_user or self.pid is None \
else "if (pid != %d) { return 0; }" % self.pid
text = text.replace("PID_FILTER", pid_filter)
collect = ""
for pname in self.args_to_probe:
param_hash = self.hashname_prefix + pname
if pname == "__latency":
collect += """
u64 __time = bpf_ktime_get_ns();
%s.update(&pid, &__time);
""" % param_hash
else:
collect += "%s.update(&pid, &%s);\n" % \
(param_hash, pname)
text = text.replace("COLLECT", collect)
return text
def _generate_entry_probe(self):
# Any $entry(name) expressions result in saving that argument
# when entering the function.
self.args_to_probe = set()
regex = r"\$entry\((\w+)\)"
for expr in self.exprs:
for arg in re.finditer(regex, expr):
self.args_to_probe.add(arg.group(1))
for arg in re.finditer(regex, self.filter):
self.args_to_probe.add(arg.group(1))
if any(map(lambda expr: "$latency" in expr, self.exprs)) or \
"$latency" in self.filter:
self.args_to_probe.add("__latency")
self.param_types["__latency"] = "u64" # nanoseconds
for pname in self.args_to_probe:
if pname not in self.param_types:
raise ValueError("$entry(%s): no such param" \
% arg)
self.hashname_prefix = "%s_param_" % self.probe_hash_name
text = ""
for pname in self.args_to_probe:
# Each argument is stored in a separate hash that is
# keyed by pid.
text += "BPF_HASH(%s, u32, %s);\n" % \
(self.hashname_prefix + pname,
self.param_types[pname])
text += self._generate_entry()
return text
def _generate_retprobe_prefix(self):
# After we're done here, there are __%s_val variables for each
# argument we needed to probe using $entry(name), and they all
# have values (which isn't necessarily the case if we missed
# the method entry probe).
text = "u32 __pid = bpf_get_current_pid_tgid();\n"
self.param_val_names = {}
for pname in self.args_to_probe:
val_name = "__%s_val" % pname
text += "%s *%s = %s.lookup(&__pid);\n" % \
(self.param_types[pname], val_name,
self.hashname_prefix + pname)
text += "if (%s == 0) { return 0 ; }\n" % val_name
self.param_val_names[pname] = val_name
return text
def _replace_entry_exprs(self):
for pname, vname in self.param_val_names.items():
if pname == "__latency":
entry_expr = "$latency"
val_expr = "(bpf_ktime_get_ns() - *%s)" % vname
else:
entry_expr = "$entry(%s)" % pname
val_expr = "(*%s)" % vname
for i in range(0, len(self.exprs)):
self.exprs[i] = self.exprs[i].replace(
entry_expr, val_expr)
self.filter = self.filter.replace(entry_expr,
val_expr)
def _attach_entry_probe(self):
if self.is_user:
self.bpf.attach_uprobe(name=self.library,
sym=self.function,
fn_name=self.entry_probe_func,
pid=self.pid or -1)
else:
self.bpf.attach_kprobe(event=self.function,
fn_name=self.entry_probe_func)
def _bail(self, error):
raise ValueError("error parsing probe '%s': %s" %
(self.raw_spec, error))
def _validate_specifier(self):
# Everything after '#' is the probe label, ignore it
spec = self.raw_spec.split('#')[0]
parts = spec.strip().split(':')
if len(parts) < 3:
self._bail("at least the probe type, library, and " +
"function signature must be specified")
if len(parts) > 6:
self._bail("extraneous ':'-separated parts detected")
if parts[0] not in ["r", "p", "t", "u"]:
self._bail("probe type must be 'p', 'r', 't', or 'u' " +
"but got '%s'" % parts[0])
if re.match(r"\w+\(.*\)", parts[2]) is None:
self._bail(("function signature '%s' has an invalid " +
"format") % parts[2])
def _parse_expr_types(self, expr_types):
if len(expr_types) == 0:
self._bail("no expr types specified")
self.expr_types = expr_types.split(',')
def _parse_exprs(self, exprs):
if len(exprs) == 0:
self._bail("no exprs specified")
self.exprs = exprs.split(',')
def __init__(self, type, specifier, pid):
self.pid = pid
self.raw_spec = specifier
self._validate_specifier()
spec_and_label = specifier.split('#')
self.label = spec_and_label[1] \
if len(spec_and_label) == 2 else None
parts = spec_and_label[0].strip().split(':')
self.type = type # hist or freq
self.probe_type = parts[0]
fparts = parts[2].split('(')
self.function = fparts[0].strip()
if self.probe_type == "t":
self.library = "" # kernel
self.tp_category = parts[1]
self.tp_event = self.function
self.tp = Tracepoint.enable_tracepoint(
self.tp_category, self.tp_event)
self.function = "perf_trace_" + self.function
elif self.probe_type == "u":
self.library = parts[1]
self._find_usdt_probe()
self._enable_usdt_probe()
else:
self.library = parts[1]
self.is_user = len(self.library) > 0
self.signature = fparts[1].strip()[:-1]
self._parse_signature()
# If the user didn't specify an expression to probe, we probe
# the retval in a ret probe, or simply the value "1" otherwise.
self.is_default_expr = len(parts) < 5
if not self.is_default_expr:
self._parse_expr_types(parts[3])
self._parse_exprs(parts[4])
if len(self.exprs) != len(self.expr_types):
self._bail("mismatched # of exprs and types")
if self.type == "hist" and len(self.expr_types) > 1:
self._bail("histograms can only have 1 expr")
else:
if not self.probe_type == "r" and self.type == "hist":
self._bail("histograms must have expr")
self.expr_types = \
["u64" if not self.probe_type == "r" else "int"]
self.exprs = \
["1" if not self.probe_type == "r" else "$retval"]
self.filter = "" if len(parts) != 6 else parts[5]
self._substitute_exprs()
# Do we need to attach an entry probe so that we can collect an
# argument that is required for an exit (return) probe?
def check(expr):
keywords = ["$entry", "$latency"]
return any(map(lambda kw: kw in expr, keywords))
self.entry_probe_required = self.probe_type == "r" and \
(any(map(check, self.exprs)) or check(self.filter))
self.probe_func_name = "%s_probe%d" % \
(self.function, Probe.next_probe_index)
self.probe_hash_name = "%s_hash%d" % \
(self.function, Probe.next_probe_index)
Probe.next_probe_index += 1
def _enable_usdt_probe(self):
if self.usdt.need_enable():
if self.pid is None:
self._bail("probe needs pid to enable")
self.usdt.enable(self.pid)
def _disable_usdt_probe(self):
if self.probe_type == "u" and self.usdt.need_enable():
self.usdt.disable(self.pid)
def close(self):
self._disable_usdt_probe()
def _find_usdt_probe(self):
reader = USDTReader(bin_path=self.library)
for probe in reader.probes:
if probe.name == self.function:
self.usdt = probe
return
self._bail("unrecognized USDT probe %s" % self.function)
def _substitute_exprs(self):
def repl(expr):
expr = self._substitute_aliases(expr)
return expr.replace("$retval", "PT_REGS_RC(ctx)")
for i in range(0, len(self.exprs)):
self.exprs[i] = repl(self.exprs[i])
self.filter = repl(self.filter)
def _is_string(self, expr_type):
return expr_type == "char*" or expr_type == "char *"
def _generate_hash_field(self, i):
if self._is_string(self.expr_types[i]):
return "struct __string_t v%d;\n" % i
else:
return "%s v%d;\n" % (self.expr_types[i], i)
def _generate_field_assignment(self, i):
if self._is_string(self.expr_types[i]):
return (" bpf_probe_read(&__key.v%d.s," +
" sizeof(__key.v%d.s), (void *)%s);\n") % \
(i, i, self.exprs[i])
else:
return " __key.v%d = %s;\n" % (i, self.exprs[i])
def _generate_hash_decl(self):
if self.type == "hist":
return "BPF_HISTOGRAM(%s, %s);" % \
(self.probe_hash_name, self.expr_types[0])
else:
text = "struct %s_key_t {\n" % self.probe_hash_name
for i in range(0, len(self.expr_types)):
text += self._generate_hash_field(i)
text += "};\n"
text += "BPF_HASH(%s, struct %s_key_t, u64);\n" % \
(self.probe_hash_name, self.probe_hash_name)
return text
def _generate_key_assignment(self):
if self.type == "hist":
return "%s __key = %s;\n" % \
(self.expr_types[0], self.exprs[0])
else:
text = "struct %s_key_t __key = {};\n" % \
self.probe_hash_name
for i in range(0, len(self.exprs)):
text += self._generate_field_assignment(i)
return text
def _generate_hash_update(self):
if self.type == "hist":
return "%s.increment(bpf_log2l(__key));" % \
self.probe_hash_name
else:
return "%s.increment(__key);" % self.probe_hash_name
def _generate_pid_filter(self):
# Kernel probes need to explicitly filter pid, because the
# attach interface doesn't support pid filtering
if self.pid is not None and not self.is_user:
return "u32 pid = bpf_get_current_pid_tgid();\n" + \
"if (pid != %d) { return 0; }" % self.pid
else:
return ""
def generate_text(self):
program = ""
probe_text = """
DATA_DECL
QUALIFIER int PROBENAME(struct pt_regs *ctx SIGNATURE)
{
PID_FILTER
PREFIX
if (!(FILTER)) return 0;
KEY_EXPR
COLLECT
return 0;
}
"""
prefix = ""
qualifier = ""
signature = ""
# If any entry arguments are probed in a ret probe, we need
# to generate an entry probe to collect them
if self.entry_probe_required:
program += self._generate_entry_probe()
prefix += self._generate_retprobe_prefix()
# Replace $entry(paramname) with a reference to the
# value we collected when entering the function:
self._replace_entry_exprs()
if self.probe_type == "t":
program += self.tp.generate_struct()
prefix += self.tp.generate_get_struct()
elif self.probe_type == "u":
qualifier = "static inline"
signature = ", int __loc_id"
prefix += self.usdt.generate_usdt_cases()
elif self.probe_type == "p" and len(self.signature) > 0:
# Only entry uprobes/kprobes can have user-specified
# signatures. Other probes force it to ().
signature = ", " + self.signature
program += probe_text.replace("PROBENAME", self.probe_func_name)
program = program.replace("SIGNATURE", signature)
program = program.replace("PID_FILTER",
self._generate_pid_filter())
decl = self._generate_hash_decl()
key_expr = self._generate_key_assignment()
collect = self._generate_hash_update()
program = program.replace("DATA_DECL", decl)
program = program.replace("KEY_EXPR", key_expr)
program = program.replace("FILTER",
"1" if len(self.filter) == 0 else self.filter)
program = program.replace("COLLECT", collect)
program = program.replace("PREFIX", prefix)
program = program.replace("QUALIFIER", qualifier)
if self.probe_type == "u":
self.usdt_thunk_names = []
program += self.usdt.generate_usdt_thunks(
self.probe_func_name, self.usdt_thunk_names)
return program
def _attach_u(self):
libpath = BPF.find_library(self.library)
if libpath is None:
libpath = ProcUtils.which(self.library)
if libpath is None or len(libpath) == 0:
self._bail("unable to find library %s" % self.library)
if self.probe_type == "u":
for i, location in enumerate(self.usdt.locations):
self.bpf.attach_uprobe(name=libpath,
addr=location.address,
fn_name=self.usdt_thunk_names[i],
pid=self.pid or -1)
elif self.probe_type == "r":
self.bpf.attach_uretprobe(name=libpath,
sym=self.function,
fn_name=self.probe_func_name,
pid=self.pid or -1)
else:
self.bpf.attach_uprobe(name=libpath,
sym=self.function,
fn_name=self.probe_func_name,
pid=self.pid or -1)
def _attach_k(self):
if self.probe_type == "r" or self.probe_type == "t":
self.bpf.attach_kretprobe(event=self.function,
fn_name=self.probe_func_name)
else:
self.bpf.attach_kprobe(event=self.function,
fn_name=self.probe_func_name)
def attach(self, bpf):
self.bpf = bpf
if self.is_user:
self._attach_u()
else:
self._attach_k()
if self.entry_probe_required:
self._attach_entry_probe()
def _v2s(self, v):
# Most fields can be converted with plain str(), but strings
# are wrapped in a __string_t which has an .s field
if "__string_t" in type(v).__name__:
return str(v.s)
return str(v)
def _display_expr(self, i):
# Replace ugly latency calculation with $latency
expr = self.exprs[i].replace(
"(bpf_ktime_get_ns() - *____latency_val)", "$latency")
# Replace alias values back with the alias name
for alias, subst in Probe.aliases.items():
expr = expr.replace(subst, alias)
# Replace retval expression with $retval
expr = expr.replace("PT_REGS_RC(ctx)", "$retval")
# Replace ugly (*__param_val) expressions with param name
return re.sub(r"\(\*__(\w+)_val\)", r"\1", expr)
def _display_key(self, key):
if self.is_default_expr:
if not self.probe_type == "r":
return "total calls"
else:
return "retval = %s" % str(key.v0)
else:
# The key object has v0, ..., vk fields containing
# the values of the expressions from self.exprs
def str_i(i):
key_i = self._v2s(getattr(key, "v%d" % i))
return "%s = %s" % \
(self._display_expr(i), key_i)
return ", ".join(map(str_i, range(0, len(self.exprs))))
def display(self, top):
data = self.bpf.get_table(self.probe_hash_name)
if self.type == "freq":
print(self.label or self.raw_spec)
print("\t%-10s %s" % ("COUNT", "EVENT"))
data = sorted(data.items(), key=lambda kv: kv[1].value)
if top is not None:
data = data[-top:]
for key, value in data:
# Print some nice values if the user didn't
# specify an expression to probe
if self.is_default_expr:
if not self.probe_type == "r":
key_str = "total calls"
else:
key_str = "retval = %s" % \
self._v2s(key.v0)
else:
key_str = self._display_key(key)
print("\t%-10s %s" % \
(str(value.value), key_str))
elif self.type == "hist":
label = self.label or (self._display_expr(0)
if not self.is_default_expr else "retval")
data.print_log2_hist(val_type=label)
def __str__(self):
return self.label or self.raw_spec
class Tool(object):
examples = """
Probe specifier syntax:
{p,r,t,u}:{[library],category}:function(signature)[:type[,type...]:expr[,expr...][:filter]][#label]
Where:
p,r,t,u -- probe at function entry, function exit, kernel tracepoint,
or USDT probe
in exit probes: can use $retval, $entry(param), $latency
library -- the library that contains the function
(leave empty for kernel functions)
category -- the category of the kernel tracepoint (e.g. net, sched)
function -- the function name to trace (or tracepoint name)
signature -- the function's parameters, as in the C header
type -- the type of the expression to collect (supports multiple)
expr -- the expression to collect (supports multiple)
filter -- the filter that is applied to collected values
label -- the label for this probe in the resulting output
EXAMPLES:
argdist -H 'p::__kmalloc(u64 size):u64:size'
Print a histogram of allocation sizes passed to kmalloc
argdist -p 1005 -C 'p:c:malloc(size_t size):size_t:size:size==16'
Print a frequency count of how many times process 1005 called malloc
with an allocation size of 16 bytes
argdist -C 'r:c:gets():char*:(char*)$retval#snooped strings'
Snoop on all strings returned by gets()
argdist -H 'r::__kmalloc(size_t size):u64:$latency/$entry(size)#ns per byte'
Print a histogram of nanoseconds per byte from kmalloc allocations
argdist -C 'p::__kmalloc(size_t size, gfp_t flags):size_t:size:flags&GFP_ATOMIC'
Print frequency count of kmalloc allocation sizes that have GFP_ATOMIC
argdist -p 1005 -C 'p:c:write(int fd):int:fd' -T 5
Print frequency counts of how many times writes were issued to a
particular file descriptor number, in process 1005, but only show
the top 5 busiest fds
argdist -p 1005 -H 'r:c:read()'
Print a histogram of results (sizes) returned by read() in process 1005
argdist -C 'r::__vfs_read():u32:$PID:$latency > 100000'
Print frequency of reads by process where the latency was >0.1ms
argdist -H 'r::__vfs_read(void *file, void *buf, size_t count):size_t:$entry(count):$latency > 1000000'
Print a histogram of read sizes that were longer than 1ms
argdist -H \\
'p:c:write(int fd, const void *buf, size_t count):size_t:count:fd==1'
Print a histogram of buffer sizes passed to write() across all
processes, where the file descriptor was 1 (STDOUT)
argdist -C 'p:c:fork()#fork calls'
Count fork() calls in libc across all processes
Can also use funccount.py, which is easier and more flexible
argdist -H 't:block:block_rq_complete():u32:tp.nr_sector'
Print histogram of number of sectors in completing block I/O requests
argdist -C 't:irq:irq_handler_entry():int:tp.irq'
Aggregate interrupts by interrupt request (IRQ)
argdist -C 'u:pthread:pthread_start():u64:arg2' -p 1337
Print frequency of function addresses used as a pthread start function,
relying on the USDT pthread_start probe in process 1337
argdist -H \\
'p:c:sleep(u32 seconds):u32:seconds' \\
'p:c:nanosleep(struct timespec *req):long:req->tv_nsec'
Print histograms of sleep() and nanosleep() parameter values
argdist -p 2780 -z 120 \\
-C 'p:c:write(int fd, char* buf, size_t len):char*:buf:fd==1'
Spy on writes to STDOUT performed by process 2780, up to a string size
of 120 characters
"""
def __init__(self):
parser = argparse.ArgumentParser(description="Trace a " +
"function and display a summary of its parameter values.",
formatter_class=argparse.RawDescriptionHelpFormatter,
epilog=Tool.examples)
parser.add_argument("-p", "--pid", type=int,
help="id of the process to trace (optional)")
parser.add_argument("-z", "--string-size", default=80,
type=int,
help="maximum string size to read from char* arguments")
parser.add_argument("-i", "--interval", default=1, type=int,
help="output interval, in seconds")
parser.add_argument("-n", "--number", type=int, dest="count",
help="number of outputs")
parser.add_argument("-v", "--verbose", action="store_true",
help="print resulting BPF program code before executing")
parser.add_argument("-T", "--top", type=int,
help="number of top results to show (not applicable to " +
"histograms)")
parser.add_argument("-H", "--histogram", nargs="*",
dest="histspecifier", metavar="specifier",
help="probe specifier to capture histogram of " +
"(see examples below)")
parser.add_argument("-C", "--count", nargs="*",
dest="countspecifier", metavar="specifier",
help="probe specifier to capture count of " +
"(see examples below)")
parser.add_argument("-I", "--include", nargs="*",
metavar="header",
help="additional header files to include in the BPF program")
self.args = parser.parse_args()
def _create_probes(self):
self.probes = []
for specifier in (self.args.countspecifier or []):
self.probes.append(Probe(
"freq", specifier, self.args.pid))
for histspecifier in (self.args.histspecifier or []):
self.probes.append(
Probe("hist", histspecifier, self.args.pid))
if len(self.probes) == 0:
print("at least one specifier is required")
exit()
def _generate_program(self):
bpf_source = """
struct __string_t { char s[%d]; };
#include <uapi/linux/ptrace.h>
""" % self.args.string_size
for include in (self.args.include or []):
bpf_source += "#include <%s>\n" % include
bpf_source += BPF.generate_auto_includes(
map(lambda p: p.raw_spec, self.probes))
bpf_source += Tracepoint.generate_decl()
bpf_source += Tracepoint.generate_entry_probe()
for probe in self.probes:
bpf_source += probe.generate_text()
if self.args.verbose:
print(bpf_source)
self.bpf = BPF(text=bpf_source)
def _attach(self):
Tracepoint.attach(self.bpf)
for probe in self.probes:
probe.attach(self.bpf)
if self.args.verbose:
print("open uprobes: %s" % BPF.open_uprobes())
print("open kprobes: %s" % BPF.open_kprobes())
def _main_loop(self):
count_so_far = 0
while True:
try:
sleep(self.args.interval)
except KeyboardInterrupt:
exit()
print("[%s]" % strftime("%H:%M:%S"))
for probe in self.probes:
probe.display(self.args.top)
count_so_far += 1
if self.args.count is not None and \
count_so_far >= self.args.count:
exit()
def _close_probes(self):
for probe in self.probes:
probe.close()
if self.args.verbose:
print("closed probe: " + str(probe))
def run(self):
try:
self._create_probes()
self._generate_program()
self._attach()
self._main_loop()
except:
if self.args.verbose:
traceback.print_exc()
elif sys.exc_info()[0] is not SystemExit:
print(sys.exc_info()[1])
self._close_probes()
if __name__ == "__main__":
Tool().run()
|
|
"""Data model"""
from datetime import datetime, timedelta
from django.db import models
from django.contrib.auth.models import User, Group, Permission
from django.db.models import signals
from django.utils.dateformat import format as dateformat, time_format
__all__ = ['Volunteer', 'Task', 'TaskAssignment', 'TaskStatus', 'TaskType', 'Committee', 'Event']
class Committee(models.Model):
"""A CHIRP committee that a Volunteer belongs to."""
name = models.CharField(max_length=100)
established = models.DateTimeField(auto_now_add=True)
modified = models.DateTimeField(auto_now=True)
def __unicode__(self):
return self.name
def get_dues_paid_year_choices():
"""Returns choices for the Dues Paid form field.
Returns tuple of tuples. I.E. ::
(('2007', '2007'), ('2008', '2008'))
"""
years = []
y = int(datetime.now().strftime('%Y'))
while y >= 2008:
years.append((str(y),str(y)))
y = y - 1
return years
class Volunteer(models.Model):
"""A volunteer user"""
user = models.ForeignKey(User, verbose_name="volunteer user",
help_text=("""\
To add a new user go to the Home page and click Add next to Users
in the Auth panel. After creating a new user,
<strong>be sure</strong> you do the following. Under Personal
Details, enter first name, last name, and email address. Under
Permissions, check the box Staff status. Under Groups, add the
new user to the group Volunteer. Then come back to this screen
and select the user."""))
committees = models.ManyToManyField(Committee,
verbose_name="active in these committees",
blank=True,
help_text="""\
Click the plus sign to add a new committee or go to Home > Volunteers >
Committees to edit the name of a committee.""")
emergency_contact_name = models.CharField(max_length=50, blank=True)
emergency_contact_email = models.EmailField(blank=True)
emergency_contact_number = models.CharField(max_length=50, blank=True)
emergency_contact_relationship = models.CharField(max_length=50, blank=True)
dj_shift_day = models.CharField(max_length=10, blank=True, choices=[
('mondays', 'Mondays'),
('tuesdays', 'Tuesdays'),
('wednesdays', 'Wednesdays'),
('thursdays', 'Thursdays'),
('fridays', 'Fridays'),
('saturdays', 'Saturdays'),
('sundays', 'Sundays'),
],
help_text="""\
Select the day of this DJ's shift or leave it blank if the DJ is
not currently active.""")
dj_shift_time_slot = models.CharField(max_length=20, blank=True,
help_text="""\
Enter the time slot (i.e. 9pm - 12am) or leave it blank if the
DJ is not currently active.""")
vol_info_sheet = models.BooleanField("volunteer info sheet on file")
dues_paid_year = models.CharField("dues paid up to", max_length=4, blank=True,
choices=get_dues_paid_year_choices(),
help_text="""\
Select the year for which this volunteer last paid dues. Leave it
blank if the volunteer has not paid any dues yet. If a volunteer does
not have to pay dues, leave this blank and check the Dues Waived box
below.""")
dues_waived = models.BooleanField(
"volunteer does not have to pay dues (waived)")
phone_1 = models.CharField(max_length=20, blank=True)
phone_2 = models.CharField(max_length=20, blank=True)
address_line_1 = models.CharField(max_length=200, blank=True)
address_line_2 = models.CharField(max_length=200, blank=True)
city = models.CharField(max_length=80, default='Chicago', blank=True)
state = models.CharField(max_length=2, default='IL', blank=True)
day_job = models.CharField(max_length=250, blank=True)
availability = models.CharField(max_length=200, blank=True,
help_text="""\
Days and times this volunteer is generally available
(i.e. 'evenings')""")
skills = models.TextField(blank=True,
help_text="""\
Enter any special skills that might be useful to CHIRP""")
has_a_car = models.BooleanField()
has_equipment = models.BooleanField()
can_dj_events = models.BooleanField("can DJ events")
can_fix_stuff = models.BooleanField()
knows_computers = models.BooleanField()
resources = models.TextField("additional resources", blank=True,
help_text="List any additional resources that might useful to CHIRP")
discovered_chirp_by_choices = [
'Friends',
'CHIRP website',
'Another website',
'News article',
'Record Fair',
'WLUW',
'Table at a Festival or Show',
'Other'
]
# re-format so that the value and prompts are the same:
discovered_chirp_by_choices = [
(txt.lower(), txt) for txt in discovered_chirp_by_choices
]
discovered_chirp_by = models.CharField(
max_length=100,
choices=discovered_chirp_by_choices,
blank=True,
verbose_name="Discovered CHIRP by")
discovered_chirp_by_details = models.CharField(
max_length=200, blank=True,
verbose_name="Details",
help_text="""
Name of website, friend, or festival (if applicable)
""")
established = models.DateTimeField(auto_now_add=True)
modified = models.DateTimeField(auto_now=True)
def __unicode__(self):
return "%s (%s %s)" % (self.user.username,
self.user.first_name,
self.user.last_name)
def first_name(self):
"""returns volunteer's first name if available, otherwise username"""
return self.user.first_name or self.user.username
def last_name(self):
"""returns volunteer's last name"""
return self.user.last_name
def email(self):
"""returns volunteer's email address"""
return self.user.email
class Event(models.Model):
"""Describes an event."""
name = models.CharField(max_length=100, verbose_name='event name')
location = models.CharField(max_length=200, verbose_name='event location')
tasks_can_be_claimed = models.BooleanField(default=False,
verbose_name='tasks are ready to be claimed')
start_date = models.DateField(blank=True, null=True,
verbose_name='start date')
DURATION_DAYS_CHOICES = (
(1, '1 day'),
(2, '2 days'),
(3, '3 days'),
(4, '4 days'),
(5, '5 days'),
)
duration_days = models.IntegerField(default=1,
choices=DURATION_DAYS_CHOICES,
verbose_name='event duration')
def __unicode__(self):
if not self.start_date:
return self.name
if self.duration_days == 1:
return '%s (%s)' % (self.name, self.start_date)
return '%s (%d days, starts %s)' % (self.name, self.duration_days,
self.start_date)
def short_name(self, suffix='', maxlen=71):
st = self.name
if (len(st) + len(suffix)) > maxlen:
st = '%s...' % (st[0:maxlen-3])
return u'%s%s' % (st, suffix)
@property
def tasks(self):
tasks = []
if self.tasks_can_be_claimed:
tasks = [t for t in self.task_set.all().order_by("start_time")]
return tasks
class TaskStatus(models.Model):
"""The status of a task performed by a volunteer"""
status = models.CharField(max_length=40)
established = models.DateTimeField(auto_now_add=True)
modified = models.DateTimeField(auto_now=True)
class Meta:
verbose_name = verbose_name_plural = 'task status'
def __unicode__(self):
return self.status
class TaskType(models.Model):
"""The type of a task performed by a volunteer"""
short_description = models.CharField(max_length=240)
important_note = models.CharField(
max_length=200, blank=True, null=True,
help_text="""\
Enter an important note about this
task type. For example: "May require heavy lifting." """)
description = models.TextField(blank=True)
established = models.DateTimeField(auto_now_add=True)
modified = models.DateTimeField(auto_now=True)
def __unicode__(self):
return self.short_description
class Task(models.Model):
"""The task performed by a volunteer.
This task may be assigned multiple times
"""
class Meta:
ordering = ['-established']
for_committee = models.ForeignKey(Committee)
for_event = models.ForeignKey(Event, blank=True, null=True)
task_type = models.ForeignKey(TaskType, verbose_name='type of task',
help_text="""\
Select what type of task this is or click the plus sign to
add a new task type.""")
start_time = models.DateTimeField(blank=True, null=True,
verbose_name='start time')
DURATION_MINUTES_CHOICES = (
(30, '1/2 hour'),
(60, '1 hour'),
(90, '1 1/2 hours'),
(120, '2 hours'),
(150, '2 1/2 hours'),
(180, '3 hours'),
(210, '3 1/2 hours'),
(240, '4 hours'),
(270, '4 1/2 hours'),
(300, '5 hours'),
(330, '5 1/2 hours'),
(360, '6 hours'),
(390, '6 1/2 hours'),
)
duration_minutes = models.IntegerField(blank=True, null=True,
choices=DURATION_MINUTES_CHOICES,
verbose_name='task duration')
num_volunteers_needed = models.PositiveSmallIntegerField(
blank=True, null=True, default=1,
verbose_name='number of volunteers needed')
potential_points = models.DecimalField(
blank=True, null=True, max_digits=5, decimal_places=1,
help_text="""
Potential points that can be earned for this task. Actual points
are set in the task assignment.
""")
description = models.TextField(blank=True,
help_text="""\
A custom description for this task if it's different from the task type.""")
established = models.DateTimeField(auto_now_add=True)
modified = models.DateTimeField(auto_now=True)
@property
def claim_task_url(self):
return "/chirp/tasks/claim/%s.json" % self.id
@property
def end_time(self):
if not self.start_time or not self.duration_minutes:
raise ValueError(
"Cannot access self.end_time because this task does "
"not have a start_time or duration value.")
return self.start_time + timedelta(minutes=self.duration_minutes)
@property
def claim_prompt(self):
return (
"You are about to commit to %s." % self.__unicode__())
@property
def claimed_by(self):
# return users assigned to this task (includes completed tasks)
return [asn.volunteer.user for asn in
self.taskassignment_set.filter(
status__in=TaskStatus.objects.filter(
status__in=['Assigned','Completed']))]
def __unicode__(self):
task = self.task_type.__unicode__()
descr = self.description or self.task_type.description
if descr:
task = "%s: %s" % (task, descr)
if self.start_time:
task = "%s on %s from %s - %s" % (
task,
dateformat(self.start_time, "D M jS"),
time_format(self.start_time, "g:i a"),
time_format(self.end_time, "g:i a"))
return task
class TaskAssignment(models.Model):
"""A task assigned to a Volunteer."""
task = models.ForeignKey(Task,
help_text="""\
Select the task or click the plus sign to create a
new one.""")
volunteer = models.ForeignKey(Volunteer, verbose_name="assigned to volunteer",
help_text="""\
Select a volunteer to perform this task or click the plus sign
to add a new one.""")
points = models.DecimalField(
max_digits=5, decimal_places=1,
help_text="points that will be earned for this task")
status = models.ForeignKey(TaskStatus,
default=lambda: TaskStatus.objects.filter(status='Assigned')[0])
established = models.DateTimeField(auto_now_add=True)
modified = models.DateTimeField(auto_now=True)
def __unicode__(self):
return "%s assigned to %s" % (self.task.__unicode__(),
self.volunteer.__unicode__())
class Meeting(models.Model):
meeting_date = models.DateField()
attendees = models.ManyToManyField(User)
established = models.DateTimeField(auto_now_add=True)
modified = models.DateTimeField(auto_now=True)
|
|
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for tensorflow.python.framework.importer."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import json
import os
import random
import numpy as np
from tensorflow.core.util import test_log_pb2
from tensorflow.python.client import session
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import test_util
from tensorflow.python.ops import array_ops
from tensorflow.python.platform import benchmark
from tensorflow.python.platform import gfile
from tensorflow.python.platform import test
# Used by SomeRandomBenchmark class below.
_ran_somebenchmark_1 = [False]
_ran_somebenchmark_2 = [False]
_ran_somebenchmark_but_shouldnt = [False]
class SomeRandomBenchmark(test.Benchmark):
"""This Benchmark should automatically be registered in the registry."""
def _dontRunThisBenchmark(self):
_ran_somebenchmark_but_shouldnt[0] = True
def notBenchmarkMethod(self):
_ran_somebenchmark_but_shouldnt[0] = True
def benchmark1(self):
_ran_somebenchmark_1[0] = True
def benchmark2(self):
_ran_somebenchmark_2[0] = True
class TestReportingBenchmark(test.Benchmark):
"""This benchmark (maybe) reports some stuff."""
def benchmarkReport1(self):
self.report_benchmark(iters=1)
def benchmarkReport2(self):
self.report_benchmark(
iters=2,
name="custom_benchmark_name",
extras={"number_key": 3,
"other_key": "string"})
def benchmark_times_an_op(self):
input_size = 5
with session.Session(config=benchmark.benchmark_config()) as sess:
a = array_ops.placeholder(dtype=dtypes.float32, shape=(input_size))
a_plus_a = a + a
return self.run_op_benchmark(
sess,
a_plus_a,
feed_dict={a: np.arange(input_size)},
min_iters=1000,
store_trace=True,
name="op_benchmark")
class BenchmarkTest(test.TestCase):
def testGlobalBenchmarkRegistry(self):
registry = list(benchmark.GLOBAL_BENCHMARK_REGISTRY)
self.assertEqual(len(registry), 2)
self.assertTrue(SomeRandomBenchmark in registry)
self.assertTrue(TestReportingBenchmark in registry)
def testRunSomeRandomBenchmark(self):
# Validate that SomeBenchmark has not run yet
self.assertFalse(_ran_somebenchmark_1[0])
self.assertFalse(_ran_somebenchmark_2[0])
self.assertFalse(_ran_somebenchmark_but_shouldnt[0])
# Run other benchmarks, but this wont run the one we care about
benchmark._run_benchmarks("unrelated")
# Validate that SomeBenchmark has not run yet
self.assertFalse(_ran_somebenchmark_1[0])
self.assertFalse(_ran_somebenchmark_2[0])
self.assertFalse(_ran_somebenchmark_but_shouldnt[0])
# Run all the benchmarks, avoid generating any reports
if benchmark.TEST_REPORTER_TEST_ENV in os.environ:
del os.environ[benchmark.TEST_REPORTER_TEST_ENV]
benchmark._run_benchmarks("SomeRandom")
# Validate that SomeRandomBenchmark ran correctly
self.assertTrue(_ran_somebenchmark_1[0])
self.assertTrue(_ran_somebenchmark_2[0])
self.assertFalse(_ran_somebenchmark_but_shouldnt[0])
_ran_somebenchmark_1[0] = False
_ran_somebenchmark_2[0] = False
_ran_somebenchmark_but_shouldnt[0] = False
# Test running a specific method of SomeRandomBenchmark
if benchmark.TEST_REPORTER_TEST_ENV in os.environ:
del os.environ[benchmark.TEST_REPORTER_TEST_ENV]
benchmark._run_benchmarks("SomeRandom.*1$")
self.assertTrue(_ran_somebenchmark_1[0])
self.assertFalse(_ran_somebenchmark_2[0])
self.assertFalse(_ran_somebenchmark_but_shouldnt[0])
@test_util.disable_xla("This test never passed for XLA")
def testReportingBenchmark(self):
tempdir = test.get_temp_dir()
try:
gfile.MakeDirs(tempdir)
except OSError as e:
# It's OK if the directory already exists.
if " exists:" not in str(e):
raise e
prefix = os.path.join(tempdir,
"reporting_bench_%016x_" % random.getrandbits(64))
expected_output_file = "%s%s" % (prefix,
"TestReportingBenchmark.benchmarkReport1")
expected_output_file_2 = "%s%s" % (
prefix, "TestReportingBenchmark.custom_benchmark_name")
expected_output_file_3 = "%s%s" % (prefix,
"TestReportingBenchmark.op_benchmark")
try:
self.assertFalse(gfile.Exists(expected_output_file))
# Run benchmark but without env, shouldn't write anything
if benchmark.TEST_REPORTER_TEST_ENV in os.environ:
del os.environ[benchmark.TEST_REPORTER_TEST_ENV]
reporting = TestReportingBenchmark()
reporting.benchmarkReport1() # This should run without writing anything
self.assertFalse(gfile.Exists(expected_output_file))
# Runbenchmark with env, should write
os.environ[benchmark.TEST_REPORTER_TEST_ENV] = prefix
reporting = TestReportingBenchmark()
reporting.benchmarkReport1() # This should write
reporting.benchmarkReport2() # This should write
benchmark_values3 = reporting.benchmark_times_an_op() # This should write
# Check the files were written
self.assertTrue(gfile.Exists(expected_output_file))
self.assertTrue(gfile.Exists(expected_output_file_2))
self.assertTrue(gfile.Exists(expected_output_file_3))
# Check the contents are correct
expected_1 = test_log_pb2.BenchmarkEntry()
expected_1.name = "TestReportingBenchmark.benchmarkReport1"
expected_1.iters = 1
expected_2 = test_log_pb2.BenchmarkEntry()
expected_2.name = "TestReportingBenchmark.custom_benchmark_name"
expected_2.iters = 2
expected_2.extras["number_key"].double_value = 3
expected_2.extras["other_key"].string_value = "string"
expected_3 = test_log_pb2.BenchmarkEntry()
expected_3.name = "TestReportingBenchmark.op_benchmark"
expected_3.iters = 1000
def read_benchmark_entry(f):
s = gfile.GFile(f, "rb").read()
entries = test_log_pb2.BenchmarkEntries.FromString(s)
self.assertEquals(1, len(entries.entry))
return entries.entry[0]
read_benchmark_1 = read_benchmark_entry(expected_output_file)
self.assertProtoEquals(expected_1, read_benchmark_1)
read_benchmark_2 = read_benchmark_entry(expected_output_file_2)
self.assertProtoEquals(expected_2, read_benchmark_2)
read_benchmark_3 = read_benchmark_entry(expected_output_file_3)
self.assertEquals(expected_3.name, read_benchmark_3.name)
self.assertEquals(expected_3.iters, read_benchmark_3.iters)
self.assertGreater(read_benchmark_3.wall_time, 0)
# Trace is not stored in benchmark entry. Instead we get it from
# return value of `run_op_benchmark` call.
full_trace = benchmark_values3["extras"]["full_trace_chrome_format"]
json_trace = json.loads(full_trace)
self.assertTrue(isinstance(json_trace, dict))
self.assertTrue("traceEvents" in json_trace.keys())
allocator_keys = [k for k in read_benchmark_3.extras.keys()
if k.startswith("allocator_maximum_num_bytes_")]
self.assertGreater(len(allocator_keys), 0)
for k in allocator_keys:
self.assertGreater(read_benchmark_3.extras[k].double_value, 0)
finally:
gfile.DeleteRecursively(tempdir)
if __name__ == "__main__":
test.main()
|
|
# Copyright 2014 OpenStack Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import json
import logging
import os
import re
import shutil
import signal
import socket
import subprocess
import tempfile
import threading
import time
import traceback
import Queue
import uuid
import gear
import yaml
import jenkins_jobs.builder
import jenkins_jobs.formatter
import zmq
import zuul.ansible.library
import zuul.ansible.plugins.callback_plugins
from zuul.lib import commandsocket
ANSIBLE_WATCHDOG_GRACE = 5 * 60
ANSIBLE_DEFAULT_TIMEOUT = 2 * 60 * 60
ANSIBLE_DEFAULT_POST_TIMEOUT = 10 * 60
COMMANDS = ['reconfigure', 'stop', 'pause', 'unpause', 'release', 'graceful',
'verbose', 'unverbose']
def boolify(x):
if isinstance(x, str):
return bool(int(x))
return bool(x)
class LaunchGearWorker(gear.Worker):
def __init__(self, *args, **kw):
self.__launch_server = kw.pop('launch_server')
super(LaunchGearWorker, self).__init__(*args, **kw)
def handleNoop(self, packet):
workers = len(self.__launch_server.node_workers)
delay = (workers ** 2) / 1000.0
time.sleep(delay)
return super(LaunchGearWorker, self).handleNoop(packet)
class NodeGearWorker(gear.Worker):
MASS_DO = 101
def sendMassDo(self, functions):
names = [gear.convert_to_bytes(x) for x in functions]
data = b'\x00'.join(names)
new_function_dict = {}
for name in names:
new_function_dict[name] = gear.FunctionRecord(name)
self.broadcast_lock.acquire()
try:
p = gear.Packet(gear.constants.REQ, self.MASS_DO, data)
self.broadcast(p)
self.functions = new_function_dict
finally:
self.broadcast_lock.release()
class Watchdog(object):
def __init__(self, timeout, function, args):
self.timeout = timeout
self.function = function
self.args = args
self.thread = threading.Thread(target=self._run)
self.thread.daemon = True
def _run(self):
while self._running and time.time() < self.end:
time.sleep(10)
if self._running:
self.function(*self.args)
def start(self):
self._running = True
self.end = time.time() + self.timeout
self.thread.start()
def stop(self):
self._running = False
class JobDir(object):
def __init__(self, keep=False):
self.keep = keep
self.root = tempfile.mkdtemp()
self.ansible_root = os.path.join(self.root, 'ansible')
os.makedirs(self.ansible_root)
self.known_hosts = os.path.join(self.ansible_root, 'known_hosts')
self.inventory = os.path.join(self.ansible_root, 'inventory')
self.playbook = os.path.join(self.ansible_root, 'playbook')
self.post_playbook = os.path.join(self.ansible_root, 'post_playbook')
self.config = os.path.join(self.ansible_root, 'ansible.cfg')
self.script_root = os.path.join(self.ansible_root, 'scripts')
self.ansible_log = os.path.join(self.ansible_root, 'ansible_log.txt')
os.makedirs(self.script_root)
self.staging_root = os.path.join(self.root, 'staging')
os.makedirs(self.staging_root)
def __enter__(self):
return self
def __exit__(self, etype, value, tb):
if not self.keep:
shutil.rmtree(self.root)
class LaunchServer(object):
log = logging.getLogger("zuul.LaunchServer")
site_section_re = re.compile('site "(.*?)"')
node_section_re = re.compile('node "(.*?)"')
def __init__(self, config, keep_jobdir=False):
self.config = config
self.options = dict(
verbose=False
)
self.keep_jobdir = keep_jobdir
self.hostname = socket.gethostname()
self.registered_functions = set()
self.node_workers = {}
self.jobs = {}
self.builds = {}
self.zmq_send_queue = Queue.Queue()
self.termination_queue = Queue.Queue()
self.sites = {}
self.static_nodes = {}
self.command_map = dict(
reconfigure=self.reconfigure,
stop=self.stop,
pause=self.pause,
unpause=self.unpause,
release=self.release,
graceful=self.graceful,
verbose=self.verboseOn,
unverbose=self.verboseOff,
)
if config.has_option('launcher', 'accept_nodes'):
self.accept_nodes = config.getboolean('launcher',
'accept_nodes')
else:
self.accept_nodes = True
self.config_accept_nodes = self.accept_nodes
if self.config.has_option('zuul', 'state_dir'):
state_dir = os.path.expanduser(
self.config.get('zuul', 'state_dir'))
else:
state_dir = '/var/lib/zuul'
path = os.path.join(state_dir, 'launcher.socket')
self.command_socket = commandsocket.CommandSocket(path)
ansible_dir = os.path.join(state_dir, 'ansible')
plugins_dir = os.path.join(ansible_dir, 'plugins')
self.callback_dir = os.path.join(plugins_dir, 'callback_plugins')
if not os.path.exists(self.callback_dir):
os.makedirs(self.callback_dir)
self.library_dir = os.path.join(ansible_dir, 'library')
if not os.path.exists(self.library_dir):
os.makedirs(self.library_dir)
callback_path = os.path.dirname(os.path.abspath(
zuul.ansible.plugins.callback_plugins.__file__))
for fn in os.listdir(callback_path):
shutil.copy(os.path.join(callback_path, fn), self.callback_dir)
library_path = os.path.dirname(os.path.abspath(
zuul.ansible.library.__file__))
for fn in os.listdir(library_path):
shutil.copy(os.path.join(library_path, fn), self.library_dir)
def get_config_default(section, option, default):
if config.has_option(section, option):
return config.get(section, option)
return default
for section in config.sections():
m = self.site_section_re.match(section)
if m:
sitename = m.group(1)
d = {}
d['host'] = get_config_default(section, 'host', None)
d['user'] = get_config_default(section, 'user', '')
d['pass'] = get_config_default(section, 'pass', '')
d['root'] = get_config_default(section, 'root', '/')
d['keytab'] = get_config_default(section, 'keytab', None)
self.sites[sitename] = d
continue
m = self.node_section_re.match(section)
if m:
nodename = m.group(1)
d = {}
d['name'] = nodename
d['host'] = config.get(section, 'host')
d['description'] = get_config_default(section,
'description', '')
if config.has_option(section, 'labels'):
d['labels'] = config.get(section, 'labels').split(',')
else:
d['labels'] = []
self.static_nodes[nodename] = d
continue
def start(self):
self._gearman_running = True
self._zmq_running = True
self._reaper_running = True
self._command_running = True
# Setup ZMQ
self.zcontext = zmq.Context()
self.zsocket = self.zcontext.socket(zmq.PUB)
self.zsocket.bind("tcp://*:8888")
# Setup Gearman
server = self.config.get('gearman', 'server')
if self.config.has_option('gearman', 'port'):
port = self.config.get('gearman', 'port')
else:
port = 4730
self.worker = LaunchGearWorker('Zuul Launch Server',
launch_server=self)
self.worker.addServer(server, port)
self.log.debug("Waiting for server")
self.worker.waitForServer()
self.log.debug("Registering")
self.register()
# Start command socket
self.log.debug("Starting command processor")
self.command_socket.start()
self.command_thread = threading.Thread(target=self.runCommand)
self.command_thread.daemon = True
self.command_thread.start()
# Load JJB config
self.loadJobs()
# Start ZMQ worker thread
self.log.debug("Starting ZMQ processor")
self.zmq_thread = threading.Thread(target=self.runZMQ)
self.zmq_thread.daemon = True
self.zmq_thread.start()
# Start node worker reaper thread
self.log.debug("Starting reaper")
self.reaper_thread = threading.Thread(target=self.runReaper)
self.reaper_thread.daemon = True
self.reaper_thread.start()
# Start Gearman worker thread
self.log.debug("Starting worker")
self.gearman_thread = threading.Thread(target=self.run)
self.gearman_thread.daemon = True
self.gearman_thread.start()
# Start static workers
for node in self.static_nodes.values():
self.log.debug("Creating static node with arguments: %s" % (node,))
self._launchWorker(node)
def loadJobs(self):
self.log.debug("Loading jobs")
builder = JJB()
path = self.config.get('launcher', 'jenkins_jobs')
builder.load_files([path])
builder.parser.expandYaml()
unseen = set(self.jobs.keys())
for job in builder.parser.jobs:
builder.expandMacros(job)
self.jobs[job['name']] = job
unseen.discard(job['name'])
for name in unseen:
del self.jobs[name]
def register(self):
new_functions = set()
if self.accept_nodes:
new_functions.add("node_assign:zuul")
new_functions.add("stop:%s" % self.hostname)
new_functions.add("set_description:%s" % self.hostname)
new_functions.add("node_revoke:%s" % self.hostname)
for function in new_functions - self.registered_functions:
self.worker.registerFunction(function)
for function in self.registered_functions - new_functions:
self.worker.unRegisterFunction(function)
self.registered_functions = new_functions
def reconfigure(self):
self.log.debug("Reconfiguring")
self.loadJobs()
for node in self.node_workers.values():
try:
if node.isAlive():
node.queue.put(dict(action='reconfigure'))
except Exception:
self.log.exception("Exception sending reconfigure command "
"to worker:")
self.log.debug("Reconfiguration complete")
def pause(self):
self.log.debug("Pausing")
self.accept_nodes = False
self.register()
for node in self.node_workers.values():
try:
if node.isAlive():
node.queue.put(dict(action='pause'))
except Exception:
self.log.exception("Exception sending pause command "
"to worker:")
self.log.debug("Paused")
def unpause(self):
self.log.debug("Unpausing")
self.accept_nodes = self.config_accept_nodes
self.register()
for node in self.node_workers.values():
try:
if node.isAlive():
node.queue.put(dict(action='unpause'))
except Exception:
self.log.exception("Exception sending unpause command "
"to worker:")
self.log.debug("Unpaused")
def release(self):
self.log.debug("Releasing idle nodes")
for node in self.node_workers.values():
if node.name in self.static_nodes:
continue
try:
if node.isAlive():
node.queue.put(dict(action='release'))
except Exception:
self.log.exception("Exception sending release command "
"to worker:")
self.log.debug("Finished releasing idle nodes")
def graceful(self):
# Note: this is run in the command processing thread; no more
# external commands will be processed after this.
self.log.debug("Gracefully stopping")
self.pause()
self.release()
self.log.debug("Waiting for all builds to finish")
while self.builds:
time.sleep(5)
self.log.debug("All builds are finished")
self.stop()
def stop(self):
self.log.debug("Stopping")
# First, stop accepting new jobs
self._gearman_running = False
self._reaper_running = False
self.worker.shutdown()
# Then stop all of the workers
for node in self.node_workers.values():
try:
if node.isAlive():
node.stop()
except Exception:
self.log.exception("Exception sending stop command to worker:")
# Stop ZMQ afterwords so that the send queue is flushed
self._zmq_running = False
self.zmq_send_queue.put(None)
self.zmq_send_queue.join()
# Stop command processing
self._command_running = False
self.command_socket.stop()
# Join the gearman thread which was stopped earlier.
self.gearman_thread.join()
# The command thread is joined in the join() method of this
# class, which is called by the command shell.
self.log.debug("Stopped")
def verboseOn(self):
self.log.debug("Enabling verbose mode")
self.options['verbose'] = True
def verboseOff(self):
self.log.debug("Disabling verbose mode")
self.options['verbose'] = False
def join(self):
self.command_thread.join()
def runCommand(self):
while self._command_running:
try:
command = self.command_socket.get()
self.command_map[command]()
except Exception:
self.log.exception("Exception while processing command")
def runZMQ(self):
while self._zmq_running or not self.zmq_send_queue.empty():
try:
item = self.zmq_send_queue.get()
self.log.debug("Got ZMQ event %s" % (item,))
if item is None:
continue
self.zsocket.send(item)
except Exception:
self.log.exception("Exception while processing ZMQ events")
finally:
self.zmq_send_queue.task_done()
def run(self):
while self._gearman_running:
try:
job = self.worker.getJob()
try:
if job.name.startswith('node_assign:'):
self.log.debug("Got node_assign job: %s" % job.unique)
self.assignNode(job)
elif job.name.startswith('stop:'):
self.log.debug("Got stop job: %s" % job.unique)
self.stopJob(job)
elif job.name.startswith('set_description:'):
self.log.debug("Got set_description job: %s" %
job.unique)
job.sendWorkComplete()
elif job.name.startswith('node_revoke:'):
self.log.debug("Got node_revoke job: %s" % job.unique)
self.revokeNode(job)
else:
self.log.error("Unable to handle job %s" % job.name)
job.sendWorkFail()
except Exception:
self.log.exception("Exception while running job")
job.sendWorkException(traceback.format_exc())
except gear.InterruptedError:
return
except Exception:
self.log.exception("Exception while getting job")
def assignNode(self, job):
args = json.loads(job.arguments)
self.log.debug("Assigned node with arguments: %s" % (args,))
self._launchWorker(args)
data = dict(manager=self.hostname)
job.sendWorkData(json.dumps(data))
job.sendWorkComplete()
def _launchWorker(self, args):
worker = NodeWorker(self.config, self.jobs, self.builds,
self.sites, args['name'], args['host'],
args['description'], args['labels'],
self.hostname, self.zmq_send_queue,
self.termination_queue, self.keep_jobdir,
self.callback_dir, self.library_dir,
self.options)
self.node_workers[worker.name] = worker
worker.thread = threading.Thread(target=worker.run)
worker.thread.start()
def revokeNode(self, job):
try:
args = json.loads(job.arguments)
self.log.debug("Revoke job with arguments: %s" % (args,))
name = args['name']
node = self.node_workers.get(name)
if not node:
self.log.debug("Unable to find worker %s" % (name,))
return
try:
if node.isAlive():
node.queue.put(dict(action='stop'))
else:
self.log.debug("Node %s is not alive while revoking node" %
(node.name,))
except Exception:
self.log.exception("Exception sending stop command "
"to worker:")
finally:
job.sendWorkComplete()
def stopJob(self, job):
try:
args = json.loads(job.arguments)
self.log.debug("Stop job with arguments: %s" % (args,))
unique = args['number']
build_worker_name = self.builds.get(unique)
if not build_worker_name:
self.log.debug("Unable to find build for job %s" % (unique,))
return
node = self.node_workers.get(build_worker_name)
if not node:
self.log.debug("Unable to find worker for job %s" % (unique,))
return
try:
if node.isAlive():
node.queue.put(dict(action='abort'))
else:
self.log.debug("Node %s is not alive while aborting job" %
(node.name,))
except Exception:
self.log.exception("Exception sending abort command "
"to worker:")
finally:
job.sendWorkComplete()
def runReaper(self):
# We don't actually care if all the events are processed
while self._reaper_running:
try:
item = self.termination_queue.get()
self.log.debug("Got termination event %s" % (item,))
if item is None:
continue
worker = self.node_workers[item]
self.log.debug("Joining %s" % (item,))
worker.thread.join()
self.log.debug("Joined %s" % (item,))
del self.node_workers[item]
except Exception:
self.log.exception("Exception while processing "
"termination events:")
finally:
self.termination_queue.task_done()
class NodeWorker(object):
retry_args = dict(register='task_result',
until='task_result.rc == 0',
retries=3,
delay=30)
def __init__(self, config, jobs, builds, sites, name, host,
description, labels, manager_name, zmq_send_queue,
termination_queue, keep_jobdir, callback_dir,
library_dir, options):
self.log = logging.getLogger("zuul.NodeWorker.%s" % (name,))
self.log.debug("Creating node worker %s" % (name,))
self.config = config
self.jobs = jobs
self.builds = builds
self.sites = sites
self.name = name
self.host = host
self.description = description
if not isinstance(labels, list):
labels = [labels]
self.labels = labels
self.thread = None
self.registered_functions = set()
# If the unpaused Event is set, that means we should run jobs.
# If it is clear, then we are paused and should not run jobs.
self.unpaused = threading.Event()
self.unpaused.set()
self._running = True
self.queue = Queue.Queue()
self.manager_name = manager_name
self.zmq_send_queue = zmq_send_queue
self.termination_queue = termination_queue
self.keep_jobdir = keep_jobdir
self.running_job_lock = threading.Lock()
self.pending_registration = False
self.registration_lock = threading.Lock()
self._get_job_lock = threading.Lock()
self._got_job = False
self._job_complete_event = threading.Event()
self._running_job = False
self._aborted_job = False
self._watchdog_timeout = False
self._sent_complete_event = False
self.ansible_job_proc = None
self.ansible_post_proc = None
self.workspace_root = config.get('launcher', 'workspace_root')
if self.config.has_option('launcher', 'private_key_file'):
self.private_key_file = config.get('launcher', 'private_key_file')
else:
self.private_key_file = '~/.ssh/id_rsa'
if self.config.has_option('launcher', 'username'):
self.username = config.get('launcher', 'username')
else:
self.username = 'zuul'
self.callback_dir = callback_dir
self.library_dir = library_dir
self.options = options
def isAlive(self):
# Meant to be called from the manager
if self.thread and self.thread.is_alive():
return True
return False
def run(self):
self.log.debug("Node worker %s starting" % (self.name,))
server = self.config.get('gearman', 'server')
if self.config.has_option('gearman', 'port'):
port = self.config.get('gearman', 'port')
else:
port = 4730
self.worker = NodeGearWorker(self.name)
self.worker.addServer(server, port)
self.log.debug("Waiting for server")
self.worker.waitForServer()
self.log.debug("Registering")
self.register()
self.gearman_thread = threading.Thread(target=self.runGearman)
self.gearman_thread.daemon = True
self.gearman_thread.start()
self.log.debug("Started")
while self._running or not self.queue.empty():
try:
self._runQueue()
except Exception:
self.log.exception("Exception in queue manager:")
def stop(self):
# If this is called locally, setting _running will be
# effictive, if it's called remotely, it will not be, but it
# will be set by the queue thread.
self.log.debug("Submitting stop request")
self._running = False
self.unpaused.set()
self.queue.put(dict(action='stop'))
self.queue.join()
def pause(self):
self.unpaused.clear()
self.worker.stopWaitingForJobs()
def unpause(self):
self.unpaused.set()
def release(self):
# If this node is idle, stop it.
old_unpaused = self.unpaused.is_set()
if old_unpaused:
self.pause()
with self._get_job_lock:
if self._got_job:
self.log.debug("This worker is not idle")
if old_unpaused:
self.unpause()
return
self.log.debug("Stopping due to release command")
self.queue.put(dict(action='stop'))
def _runQueue(self):
item = self.queue.get()
try:
if item['action'] == 'stop':
self.log.debug("Received stop request")
self._running = False
self.termination_queue.put(self.name)
if not self.abortRunningJob():
self.sendFakeCompleteEvent()
else:
self._job_complete_event.wait()
self.worker.shutdown()
if item['action'] == 'pause':
self.log.debug("Received pause request")
self.pause()
if item['action'] == 'unpause':
self.log.debug("Received unpause request")
self.unpause()
if item['action'] == 'release':
self.log.debug("Received release request")
self.release()
elif item['action'] == 'reconfigure':
self.log.debug("Received reconfigure request")
self.register()
elif item['action'] == 'abort':
self.log.debug("Received abort request")
self.abortRunningJob()
finally:
self.queue.task_done()
def runGearman(self):
while self._running:
try:
self.unpaused.wait()
if self._running:
self._runGearman()
except Exception:
self.log.exception("Exception in gearman manager:")
with self._get_job_lock:
self._got_job = False
def _runGearman(self):
if self.pending_registration:
self.register()
with self._get_job_lock:
try:
job = self.worker.getJob()
self._got_job = True
except gear.InterruptedError:
return
self.log.debug("Node worker %s got job %s" % (self.name, job.name))
try:
if job.name not in self.registered_functions:
self.log.error("Unable to handle job %s" % job.name)
job.sendWorkFail()
return
self.launch(job)
except Exception:
self.log.exception("Exception while running job")
job.sendWorkException(traceback.format_exc())
def generateFunctionNames(self, job):
# This only supports "node: foo" and "node: foo || bar"
ret = set()
job_labels = job.get('node')
matching_labels = set()
if job_labels:
job_labels = [x.strip() for x in job_labels.split('||')]
matching_labels = set(self.labels) & set(job_labels)
if not matching_labels:
return ret
ret.add('build:%s' % (job['name'],))
for label in matching_labels:
ret.add('build:%s:%s' % (job['name'], label))
return ret
def register(self):
if not self.registration_lock.acquire(False):
self.log.debug("Registration already in progress")
return
try:
if self._running_job:
self.pending_registration = True
self.log.debug("Ignoring registration due to running job")
return
self.log.debug("Updating registration")
self.pending_registration = False
new_functions = set()
for job in self.jobs.values():
new_functions |= self.generateFunctionNames(job)
self.worker.sendMassDo(new_functions)
self.registered_functions = new_functions
finally:
self.registration_lock.release()
def abortRunningJob(self):
self._aborted_job = True
return self.abortRunningProc(self.ansible_job_proc)
def abortRunningProc(self, proc):
aborted = False
self.log.debug("Abort: acquiring job lock")
with self.running_job_lock:
if self._running_job:
self.log.debug("Abort: a job is running")
if proc:
self.log.debug("Abort: sending kill signal to job "
"process group")
try:
pgid = os.getpgid(proc.pid)
os.killpg(pgid, signal.SIGKILL)
aborted = True
except Exception:
self.log.exception("Exception while killing "
"ansible process:")
else:
self.log.debug("Abort: no job is running")
return aborted
def launch(self, job):
self.log.info("Node worker %s launching job %s" %
(self.name, job.name))
# Make sure we can parse what we need from the job first
args = json.loads(job.arguments)
offline = boolify(args.get('OFFLINE_NODE_WHEN_COMPLETE', False))
job_name = job.name.split(':')[1]
# Initialize the result so we have something regardless of
# whether the job actually runs
result = None
self._sent_complete_event = False
self._aborted_job = False
self._watchog_timeout = False
try:
self.sendStartEvent(job_name, args)
except Exception:
self.log.exception("Exception while sending job start event")
try:
result = self.runJob(job, args)
except Exception:
self.log.exception("Exception while launching job thread")
self._running_job = False
try:
data = json.dumps(dict(result=result))
job.sendWorkComplete(data)
except Exception:
self.log.exception("Exception while sending job completion packet")
try:
self.sendCompleteEvent(job_name, result, args)
except Exception:
self.log.exception("Exception while sending job completion event")
try:
del self.builds[job.unique]
except Exception:
self.log.exception("Exception while clearing build record")
self._job_complete_event.set()
if offline and self._running:
self.stop()
def sendStartEvent(self, name, parameters):
build = dict(node_name=self.name,
host_name=self.manager_name,
parameters=parameters)
event = dict(name=name,
build=build)
item = "onStarted %s" % json.dumps(event)
self.log.debug("Sending over ZMQ: %s" % (item,))
self.zmq_send_queue.put(item)
def sendCompleteEvent(self, name, status, parameters):
build = dict(status=status,
node_name=self.name,
host_name=self.manager_name,
parameters=parameters)
event = dict(name=name,
build=build)
item = "onFinalized %s" % json.dumps(event)
self.log.debug("Sending over ZMQ: %s" % (item,))
self.zmq_send_queue.put(item)
self._sent_complete_event = True
def sendFakeCompleteEvent(self):
if self._sent_complete_event:
return
self.sendCompleteEvent('zuul:launcher-shutdown',
'SUCCESS', {})
def runJob(self, job, args):
self.ansible_job_proc = None
self.ansible_post_proc = None
result = None
with self.running_job_lock:
if not self._running:
return result
self._running_job = True
self._job_complete_event.clear()
self.log.debug("Job %s: beginning" % (job.unique,))
self.builds[job.unique] = self.name
with JobDir(self.keep_jobdir) as jobdir:
self.log.debug("Job %s: job root at %s" %
(job.unique, jobdir.root))
timeout = self.prepareAnsibleFiles(jobdir, job, args)
data = {
'manager': self.manager_name,
'number': job.unique,
}
if ':' in self.host:
data['url'] = 'telnet://[%s]:19885' % self.host
else:
data['url'] = 'telnet://%s:19885' % self.host
job.sendWorkData(json.dumps(data))
job.sendWorkStatus(0, 100)
job_status = self.runAnsiblePlaybook(jobdir, timeout)
if job_status is None:
# The result of the job is indeterminate. Zuul will
# run it again.
return result
post_status = self.runAnsiblePostPlaybook(jobdir, job_status)
if not post_status:
result = 'POST_FAILURE'
elif job_status:
result = 'SUCCESS'
else:
result = 'FAILURE'
if self._aborted_job and not self._watchdog_timeout:
# A Null result will cause zuul to relaunch the job if
# it needs to.
result = None
return result
def getHostList(self):
return [('node', dict(
ansible_host=self.host, ansible_user=self.username))]
def _substituteVariables(self, text, variables):
def lookup(match):
return variables.get(match.group(1), '')
return re.sub('\$([A-Za-z0-9_]+)', lookup, text)
def _getRsyncOptions(self, source, parameters):
# Treat the publisher source as a filter; ant and rsync behave
# fairly close in this manner, except for leading directories.
source = self._substituteVariables(source, parameters)
# If the source starts with ** then we want to match any
# number of directories, so don't anchor the include filter.
# If it does not start with **, then the intent is likely to
# at least start by matching an immediate file or subdirectory
# (even if later we have a ** in the middle), so in this case,
# anchor it to the root of the transfer (the workspace).
if not source.startswith('**'):
source = os.path.join('/', source)
# These options mean: include the thing we want, include any
# directories (so that we continue to search for the thing we
# want no matter how deep it is), exclude anything that
# doesn't match the thing we want or is a directory, then get
# rid of empty directories left over at the end.
rsync_opts = ['--include="%s"' % source,
'--include="*/"',
'--exclude="*"',
'--prune-empty-dirs']
return rsync_opts
def _makeSCPTask(self, jobdir, publisher, parameters):
tasks = []
for scpfile in publisher['scp']['files']:
scproot = tempfile.mkdtemp(dir=jobdir.staging_root)
os.chmod(scproot, 0o755)
site = publisher['scp']['site']
if scpfile.get('copy-console'):
# Include the local ansible directory in the console
# upload. This uploads the playbook and ansible logs.
copyargs = dict(src=jobdir.ansible_root + '/',
dest=os.path.join(scproot, '_zuul_ansible'))
task = dict(copy=copyargs,
delegate_to='127.0.0.1')
# This is a local copy and should not fail, so does
# not need a retry stanza.
tasks.append(task)
# Fetch the console log from the remote host.
src = '/tmp/console.html'
rsync_opts = []
else:
src = parameters['WORKSPACE']
if not src.endswith('/'):
src = src + '/'
rsync_opts = self._getRsyncOptions(scpfile['source'],
parameters)
syncargs = dict(src=src,
dest=scproot,
copy_links='yes',
mode='pull')
if rsync_opts:
syncargs['rsync_opts'] = rsync_opts
task = dict(synchronize=syncargs)
if not scpfile.get('copy-after-failure'):
task['when'] = 'success'
task.update(self.retry_args)
tasks.append(task)
task = self._makeSCPTaskLocalAction(
site, scpfile, scproot, parameters)
task.update(self.retry_args)
tasks.append(task)
return tasks
def _makeSCPTaskLocalAction(self, site, scpfile, scproot, parameters):
if site not in self.sites:
raise Exception("Undefined SCP site: %s" % (site,))
site = self.sites[site]
dest = scpfile['target'].lstrip('/')
dest = self._substituteVariables(dest, parameters)
dest = os.path.join(site['root'], dest)
dest = os.path.normpath(dest)
if not dest.startswith(site['root']):
raise Exception("Target path %s is not below site root" %
(dest,))
rsync_cmd = [
'/usr/bin/rsync', '--delay-updates', '-F',
'--compress', '-rt', '--safe-links',
'--rsync-path="mkdir -p {dest} && rsync"',
'--rsh="/usr/bin/ssh -i {private_key_file} -S none '
'-o StrictHostKeyChecking=no -q"',
'--out-format="<<CHANGED>>%i %n%L"',
'{source}', '"{user}@{host}:{dest}"'
]
if scpfile.get('keep-hierarchy'):
source = '"%s/"' % scproot
else:
source = '`/usr/bin/find "%s" -type f`' % scproot
shellargs = ' '.join(rsync_cmd).format(
source=source,
dest=dest,
private_key_file=self.private_key_file,
host=site['host'],
user=site['user'])
task = dict(shell=shellargs,
delegate_to='127.0.0.1')
if not scpfile.get('copy-after-failure'):
task['when'] = 'success'
return task
def _makeFTPTask(self, jobdir, publisher, parameters):
tasks = []
ftp = publisher['ftp']
site = ftp['site']
if site not in self.sites:
raise Exception("Undefined FTP site: %s" % site)
site = self.sites[site]
ftproot = tempfile.mkdtemp(dir=jobdir.staging_root)
ftpcontent = os.path.join(ftproot, 'content')
os.makedirs(ftpcontent)
ftpscript = os.path.join(ftproot, 'script')
src = parameters['WORKSPACE']
if not src.endswith('/'):
src = src + '/'
rsync_opts = self._getRsyncOptions(ftp['source'],
parameters)
syncargs = dict(src=src,
dest=ftpcontent,
copy_links='yes',
mode='pull')
if rsync_opts:
syncargs['rsync_opts'] = rsync_opts
task = dict(synchronize=syncargs,
when='success')
task.update(self.retry_args)
tasks.append(task)
task = dict(shell='lftp -f %s' % ftpscript,
when='success',
delegate_to='127.0.0.1')
ftpsource = ftpcontent
if ftp.get('remove-prefix'):
ftpsource = os.path.join(ftpcontent, ftp['remove-prefix'])
while ftpsource[-1] == '/':
ftpsource = ftpsource[:-1]
ftptarget = ftp['target'].lstrip('/')
ftptarget = self._substituteVariables(ftptarget, parameters)
ftptarget = os.path.join(site['root'], ftptarget)
ftptarget = os.path.normpath(ftptarget)
if not ftptarget.startswith(site['root']):
raise Exception("Target path %s is not below site root" %
(ftptarget,))
while ftptarget[-1] == '/':
ftptarget = ftptarget[:-1]
with open(ftpscript, 'w') as script:
script.write('open %s\n' % site['host'])
script.write('user %s %s\n' % (site['user'], site['pass']))
script.write('mirror -R %s %s\n' % (ftpsource, ftptarget))
task.update(self.retry_args)
tasks.append(task)
return tasks
def _makeAFSTask(self, jobdir, publisher, parameters):
tasks = []
afs = publisher['afs']
site = afs['site']
if site not in self.sites:
raise Exception("Undefined AFS site: %s" % site)
site = self.sites[site]
# It is possible that this could be done in one rsync step,
# however, the current rysnc from the host is complicated (so
# that we can match the behavior of ant), and then rsync to
# afs is complicated and involves a pre-processing step in
# both locations (so that we can exclude directories). Each
# is well understood individually so it is easier to compose
# them in series than combine them together. A better,
# longer-lived solution (with better testing) would do just
# that.
afsroot = tempfile.mkdtemp(dir=jobdir.staging_root)
afscontent = os.path.join(afsroot, 'content')
src = parameters['WORKSPACE']
if not src.endswith('/'):
src = src + '/'
rsync_opts = self._getRsyncOptions(afs['source'],
parameters)
syncargs = dict(src=src,
dest=afscontent,
copy_links='yes',
mode='pull')
if rsync_opts:
syncargs['rsync_opts'] = rsync_opts
task = dict(synchronize=syncargs,
when='success')
task.update(self.retry_args)
tasks.append(task)
afstarget = afs['target']
afstarget = self._substituteVariables(afstarget, parameters)
afstarget = os.path.join(site['root'], afstarget)
afstarget = os.path.normpath(afstarget)
if not afstarget.startswith(site['root']):
raise Exception("Target path %s is not below site root" %
(afstarget,))
src_markers_file = os.path.join(afsroot, 'src-markers')
dst_markers_file = os.path.join(afsroot, 'dst-markers')
exclude_file = os.path.join(afsroot, 'exclude')
filter_file = os.path.join(afsroot, 'filter')
find_pipe = [
"/usr/bin/find {path} -name .root-marker -printf '%P\n'",
"/usr/bin/xargs -I{{}} dirname {{}}",
"/usr/bin/sort > {file}"]
find_pipe = ' | '.join(find_pipe)
# Find the list of root markers in the just-completed build
# (usually there will only be one, but some builds produce
# content at the root *and* at a tag location).
task = dict(shell=find_pipe.format(path=afscontent,
file=src_markers_file),
when='success',
delegate_to='127.0.0.1')
tasks.append(task)
# Find the list of root markers that already exist in the
# published site.
task = dict(shell=find_pipe.format(path=afstarget,
file=dst_markers_file),
when='success',
delegate_to='127.0.0.1')
tasks.append(task)
# Create a file that contains the set of directories with root
# markers in the published site that do not have root markers
# in the built site.
exclude_command = "/usr/bin/comm -23 {dst} {src} > {exclude}".format(
src=src_markers_file,
dst=dst_markers_file,
exclude=exclude_file)
task = dict(shell=exclude_command,
when='success',
delegate_to='127.0.0.1')
tasks.append(task)
# Create a filter list for rsync so that we copy exactly the
# directories we want to without deleting any existing
# directories in the published site that were placed there by
# previous builds.
# The first group of items in the filter list are the
# directories in the current build with root markers, except
# for the root of the build. This is so that if, later, the
# build root ends up as an exclude, we still copy the
# directories in this build underneath it (since these
# includes will have matched first). We can't include the
# build root itself here, even if we do want to synchronize
# it, since that would defeat later excludes. In other words,
# if the build produces a root marker in "/subdir" but not in
# "/", this section is needed so that "/subdir" is copied at
# all, since "/" will be excluded later.
command = ("/bin/grep -v '^/$' {src} | "
"/bin/sed -e 's/^+ /' > {filter}".format(
src=src_markers_file,
filter=filter_file))
task = dict(shell=command,
when='success',
delegate_to='127.0.0.1')
tasks.append(task)
# The second group is the set of directories that are in the
# published site but not in the built site. This is so that
# if the built site does contain a marker at root (meaning
# that there is content that should be copied into the root)
# that we don't delete everything else previously built
# underneath the root.
command = ("/bin/grep -v '^/$' {exclude} | "
"/bin/sed -e 's/^- /' >> {filter}".format(
exclude=exclude_file,
filter=filter_file))
task = dict(shell=command,
when='success',
delegate_to='127.0.0.1')
tasks.append(task)
# The last entry in the filter file is for the build root. If
# there is no marker in the build root, then we need to
# exclude it from the rsync, so we add it here. It needs to
# be in the form of '/*' so that it matches all of the files
# in the build root. If there is no marker at the build root,
# then we should omit the '/*' exclusion so that it is
# implicitly included.
command = "grep '^/$' {exclude} && echo '- /*' >> {filter}".format(
exclude=exclude_file,
filter=filter_file)
task = dict(shell=command,
when='success',
delegate_to='127.0.0.1')
tasks.append(task)
# Perform the rsync with the filter list.
rsync_cmd = [
'/usr/bin/k5start', '-t', '-k', '{keytab}', '--',
'/usr/bin/rsync', '-rtp', '--safe-links', '--delete-after',
"--filter='merge {filter}'", '{src}/', '{dst}/',
]
shellargs = ' '.join(rsync_cmd).format(
src=afscontent,
dst=afstarget,
filter=filter_file,
keytab=site['keytab'])
task = dict(shell=shellargs,
when='success',
delegate_to='127.0.0.1')
tasks.append(task)
return tasks
def _makeBuilderTask(self, jobdir, builder, parameters):
tasks = []
script_fn = '%s.sh' % str(uuid.uuid4().hex)
script_path = os.path.join(jobdir.script_root, script_fn)
with open(script_path, 'w') as script:
data = builder['shell']
if not data.startswith('#!'):
data = '#!/bin/bash -x\n %s' % (data,)
script.write(data)
remote_path = os.path.join('/tmp', script_fn)
copy = dict(src=script_path,
dest=remote_path,
mode=0o555)
task = dict(copy=copy)
tasks.append(task)
runner = dict(command=remote_path,
cwd=parameters['WORKSPACE'],
parameters=parameters)
task = dict(zuul_runner=runner)
task['name'] = ('zuul_runner with {{ timeout | int - elapsed_time }} '
'second timeout')
task['when'] = '{{ elapsed_time < timeout | int }}'
task['async'] = '{{ timeout | int - elapsed_time }}'
task['poll'] = 5
tasks.append(task)
filetask = dict(path=remote_path,
state='absent')
task = dict(file=filetask)
tasks.append(task)
return tasks
def _transformPublishers(self, jjb_job):
early_publishers = []
late_publishers = []
old_publishers = jjb_job.get('publishers', [])
for publisher in old_publishers:
early_scpfiles = []
late_scpfiles = []
if 'scp' not in publisher:
early_publishers.append(publisher)
continue
copy_console = False
for scpfile in publisher['scp']['files']:
if scpfile.get('copy-console'):
scpfile['keep-hierarchy'] = True
late_scpfiles.append(scpfile)
copy_console = True
else:
early_scpfiles.append(scpfile)
publisher['scp']['files'] = early_scpfiles + late_scpfiles
if copy_console:
late_publishers.append(publisher)
else:
early_publishers.append(publisher)
publishers = early_publishers + late_publishers
if old_publishers != publishers:
self.log.debug("Transformed job publishers")
return early_publishers, late_publishers
def prepareAnsibleFiles(self, jobdir, gearman_job, args):
job_name = gearman_job.name.split(':')[1]
jjb_job = self.jobs[job_name]
parameters = args.copy()
parameters['WORKSPACE'] = os.path.join(self.workspace_root, job_name)
with open(jobdir.inventory, 'w') as inventory:
for host_name, host_vars in self.getHostList():
inventory.write(host_name)
for k, v in host_vars.items():
inventory.write(' %s=%s' % (k, v))
inventory.write('\n')
timeout = None
timeout_var = None
for wrapper in jjb_job.get('wrappers', []):
if isinstance(wrapper, dict):
build_timeout = wrapper.get('timeout')
if isinstance(build_timeout, dict):
timeout_var = build_timeout.get('timeout-var')
timeout = build_timeout.get('timeout')
if timeout is not None:
timeout = int(timeout) * 60
if not timeout:
timeout = ANSIBLE_DEFAULT_TIMEOUT
if timeout_var:
parameters[timeout_var] = str(timeout * 1000)
with open(jobdir.playbook, 'w') as playbook:
pre_tasks = []
tasks = []
main_block = []
error_block = []
variables = []
shellargs = "ssh-keyscan {{ ansible_host }} > %s" % (
jobdir.known_hosts)
pre_tasks.append(dict(shell=shellargs,
delegate_to='127.0.0.1'))
tasks.append(dict(block=main_block,
rescue=error_block))
task = dict(file=dict(path='/tmp/console.html', state='absent'))
main_block.append(task)
task = dict(zuul_console=dict(path='/tmp/console.html',
port=19885))
main_block.append(task)
task = dict(file=dict(path=parameters['WORKSPACE'],
state='directory'))
main_block.append(task)
msg = [
"Launched by %s" % self.manager_name,
"Building remotely on %s in workspace %s" % (
self.name, parameters['WORKSPACE'])]
task = dict(zuul_log=dict(msg=msg))
main_block.append(task)
for builder in jjb_job.get('builders', []):
if 'shell' in builder:
main_block.extend(
self._makeBuilderTask(jobdir, builder, parameters))
task = dict(zuul_log=dict(msg="Job complete, result: SUCCESS"))
main_block.append(task)
task = dict(zuul_log=dict(msg="Job complete, result: FAILURE"))
error_block.append(task)
error_block.append(dict(fail=dict(msg='FAILURE')))
variables.append(dict(timeout=timeout))
play = dict(hosts='node', name='Job body', vars=variables,
pre_tasks=pre_tasks, tasks=tasks)
playbook.write(yaml.safe_dump([play], default_flow_style=False))
early_publishers, late_publishers = self._transformPublishers(jjb_job)
with open(jobdir.post_playbook, 'w') as playbook:
blocks = []
for publishers in [early_publishers, late_publishers]:
block = []
for publisher in publishers:
if 'scp' in publisher:
block.extend(self._makeSCPTask(jobdir, publisher,
parameters))
if 'ftp' in publisher:
block.extend(self._makeFTPTask(jobdir, publisher,
parameters))
if 'afs' in publisher:
block.extend(self._makeAFSTask(jobdir, publisher,
parameters))
blocks.append(block)
# The 'always' section contains the log publishing tasks,
# the 'block' contains all the other publishers. This way
# we run the log publisher regardless of whether the rest
# of the publishers succeed.
tasks = []
tasks.append(dict(block=blocks[0],
always=blocks[1]))
play = dict(hosts='node', name='Publishers',
tasks=tasks)
playbook.write(yaml.safe_dump([play], default_flow_style=False))
with open(jobdir.config, 'w') as config:
config.write('[defaults]\n')
config.write('hostfile = %s\n' % jobdir.inventory)
config.write('keep_remote_files = True\n')
config.write('local_tmp = %s/.ansible/local_tmp\n' % jobdir.root)
config.write('remote_tmp = %s/.ansible/remote_tmp\n' % jobdir.root)
config.write('private_key_file = %s\n' % self.private_key_file)
config.write('retry_files_enabled = False\n')
config.write('log_path = %s\n' % jobdir.ansible_log)
config.write('gathering = explicit\n')
config.write('callback_plugins = %s\n' % self.callback_dir)
config.write('library = %s\n' % self.library_dir)
# bump the timeout because busy nodes may take more than
# 10s to respond
config.write('timeout = 30\n')
config.write('[ssh_connection]\n')
ssh_args = "-o ControlMaster=auto -o ControlPersist=60s " \
"-o UserKnownHostsFile=%s" % jobdir.known_hosts
config.write('ssh_args = %s\n' % ssh_args)
return timeout
def _ansibleTimeout(self, proc, msg):
self._watchdog_timeout = True
self.log.warning(msg)
self.abortRunningProc(proc)
def runAnsiblePlaybook(self, jobdir, timeout):
# Set LOGNAME env variable so Ansible log_path log reports
# the correct user.
env_copy = os.environ.copy()
env_copy['LOGNAME'] = 'zuul'
if self.options['verbose']:
verbose = '-vvv'
else:
verbose = '-v'
cmd = ['ansible-playbook', jobdir.playbook, verbose]
self.log.debug("Ansible command: %s" % (cmd,))
self.ansible_job_proc = subprocess.Popen(
cmd,
cwd=jobdir.ansible_root,
stdout=subprocess.PIPE,
stderr=subprocess.STDOUT,
preexec_fn=os.setsid,
env=env_copy,
)
ret = None
watchdog = Watchdog(timeout + ANSIBLE_WATCHDOG_GRACE,
self._ansibleTimeout,
(self.ansible_job_proc,
"Ansible timeout exceeded"))
watchdog.start()
try:
for line in iter(self.ansible_job_proc.stdout.readline, b''):
line = line[:1024].rstrip()
self.log.debug("Ansible output: %s" % (line,))
ret = self.ansible_job_proc.wait()
finally:
watchdog.stop()
self.log.debug("Ansible exit code: %s" % (ret,))
self.ansible_job_proc = None
if self._watchdog_timeout:
return False
if ret == 3:
# AnsibleHostUnreachable: We had a network issue connecting to
# our zuul-worker.
return None
elif ret == -9:
# Received abort request.
return None
return ret == 0
def runAnsiblePostPlaybook(self, jobdir, success):
# Set LOGNAME env variable so Ansible log_path log reports
# the correct user.
env_copy = os.environ.copy()
env_copy['LOGNAME'] = 'zuul'
if self.options['verbose']:
verbose = '-vvv'
else:
verbose = '-v'
cmd = ['ansible-playbook', jobdir.post_playbook,
'-e', 'success=%s' % success, verbose]
self.log.debug("Ansible post command: %s" % (cmd,))
self.ansible_post_proc = subprocess.Popen(
cmd,
cwd=jobdir.ansible_root,
stdout=subprocess.PIPE,
stderr=subprocess.STDOUT,
preexec_fn=os.setsid,
env=env_copy,
)
ret = None
watchdog = Watchdog(ANSIBLE_DEFAULT_POST_TIMEOUT,
self._ansibleTimeout,
(self.ansible_post_proc,
"Ansible post timeout exceeded"))
watchdog.start()
try:
for line in iter(self.ansible_post_proc.stdout.readline, b''):
line = line[:1024].rstrip()
self.log.debug("Ansible post output: %s" % (line,))
ret = self.ansible_post_proc.wait()
finally:
watchdog.stop()
self.log.debug("Ansible post exit code: %s" % (ret,))
self.ansible_post_proc = None
return ret == 0
class JJB(jenkins_jobs.builder.Builder):
def __init__(self):
self.global_config = None
self._plugins_list = []
def expandComponent(self, component_type, component, template_data):
component_list_type = component_type + 's'
new_components = []
if isinstance(component, dict):
name, component_data = next(iter(component.items()))
if template_data:
component_data = jenkins_jobs.formatter.deep_format(
component_data, template_data, True)
else:
name = component
component_data = {}
new_component = self.parser.data.get(component_type, {}).get(name)
if new_component:
for new_sub_component in new_component[component_list_type]:
new_components.extend(
self.expandComponent(component_type,
new_sub_component, component_data))
else:
new_components.append({name: component_data})
return new_components
def expandMacros(self, job):
for component_type in ['builder', 'publisher', 'wrapper']:
component_list_type = component_type + 's'
new_components = []
for new_component in job.get(component_list_type, []):
new_components.extend(self.expandComponent(component_type,
new_component, {}))
job[component_list_type] = new_components
|
|
import re
from collections import OrderedDict
from parsimonious.grammar import Grammar
from parsimonious.exceptions import IncompleteParseError
from .errors import IndentError, LyError, LySyntaxError, UnsupportedCommaNesting
from .globals import BLK_OPEN, BLK_CLOSE, INDENT_SIZE, COMMENT_OPEN, COMMENT_CLOSE
from .ly_types import RuleBlock, UnpackMe, RootBlock, IgnoreMe, ParentReference
from .vendor import vendorize_css, vendorize_tree
ly_grammar = ""
funcmap = {}
defer_children_eval = []
### GRAMMAR HANDLING ###
class GDef(object):
'''
Decorator for defining LightYear syntax.
'''
def __init__(self, ruletxt, defer=False):
global ly_grammar
ly_grammar += ruletxt + '\n'
self.rulenames = []
for line in ruletxt.split('\n'):
line = line.strip()
if line:
name = line.split('=')[0].strip()
self.rulenames.append(name)
if defer:
defer_children_eval.append(name)
def __call__(self, f):
for name in self.rulenames:
funcmap[name] = f
### LIGHTYEAR PARSER ###
class LY(object):
'''
Parses LightYear code and generates CSS as output.
'''
grammar = None
def __init__(self, env=None, debug=False, path=None, vendorize=False, vendor_targets=None):
if not self.grammar:
self.__class__.grammar = Grammar(ly_grammar)['ltree']
self.env = env or {}
self.debug = debug
self.path = path
self.vendorize = vendorize
self.vendor_targets = vendor_targets
def eval(self, ly_code):
'''
Accept a string containing LightYear code as input, and recursively
evaluate the root node.
'''
lines = ly_code.split('\n')
lines = tokenize_whitespace(lines)
lines = tokenize_comments(lines)
ly_code = '\n'.join(lines)
self.debug = DebugGenerator(ly_code) if self.debug else False
try:
node = self.grammar.parse(ly_code)
except IncompleteParseError as e:
raise LySyntaxError(e.pos, ly_code)
self.ltree = self._evalnode(node)
self.flatten()
if self.vendorize == 'offline':
vendorize_tree(self.ltree, offline=True, targets=self.vendor_targets)
if self.vendorize == 'online':
vendorize_tree(self.ltree, offline=False, targets=self.vendor_targets)
def _evalnode(self, node):
'''
Evaluate a Parsimonious node.
'''
fn = funcmap.get(node.expr_name, lambda env, node, children: children)
if node.expr_name in defer_children_eval:
return fn(self.env, node)
children = [self._evalnode(child) for child in node]
# Mixins return lists that need to be unpacked.
for i, child in enumerate(children):
if isinstance(child, UnpackMe):
for packed_child in reversed(child):
children.insert(i+1, packed_child)
return fn(self.env, node, children)
def flatten(self):
'''
Flatten all nested rules and convert parent references
to standard selectors. Execute only after LightYear
code evaluation.
'''
for i, element in enumerate(self.ltree):
if isinstance(element, RuleBlock):
for j, child_element in reversed(list(enumerate(element.block))):
# Move nested RuleBlock objects to ltree and modify selectors.
if isinstance(child_element, RuleBlock):
if len(child_element.selectors) > 1 and len(element.selectors) > 1:
raise UnsupportedCommaNesting()
elif len(child_element.selectors) > 1:
child_element.selectors = [
element.selectors[0] + ' ' + child_sel
for child_sel in child_element.selectors]
else:
child_element.selectors = [
parent_sel + ' ' + child_element.selectors[0]
for parent_sel in element.selectors]
self.ltree.insert(i+1, child_element)
element.block[j] = IgnoreMe()
# Find parent selectors and convert to standard RuleBlocks.
elif isinstance(child_element, ParentReference):
ps_rule_block = child_element.rule_block
if not ps_rule_block.tag:
ps_rule_block.tag = element.tag
if len(ps_rule_block.selectors) > 1:
new_selectors = (
child_element.selectors[:-1] +
[child_element.selectors[-1] + ps_rule_block.selectors[0]] +
ps_rule_block.selectors[1:]
)
else:
new_selectors = (
element.selectors[:-1] +
[element.selectors[-1] + ps_rule_block.selectors[0]]
)
new_block = RuleBlock(
tag=ps_rule_block.tag,
selectors=new_selectors,
block=ps_rule_block.block,
index=ps_rule_block.index)
self.ltree.insert(i+1, new_block)
element.block[j] = IgnoreMe()
def reduce(self):
'''
Consolidate rules with identical selectors into single rules.
'''
# Reduce blocks.
ltree_reduced = OrderedDict()
non_block_count = 0
for element in self.ltree:
if hasattr(element, 'selectors'):
hash_ = repr(element.selectors)
elif hasattr(element, 'text'):
hash_ = element.text
else:
hash_ = non_block_count
non_block_count += 1
if hasattr(element, 'block'):
if hash_ in ltree_reduced:
ltree_reduced[hash_].block += element.block
else:
ltree_reduced[hash_] = element
else:
ltree_reduced[hash_] = element
ltree_reduced = [ltree_reduced[k] for k in ltree_reduced]
# Reduce properties.
for element in ltree_reduced:
non_property_count = 0
if hasattr(element, 'block'):
block_reduced = OrderedDict()
for child in element.block:
if hasattr(child, 'prop'):
block_reduced[child.prop] = child
else:
block_reduced[non_property_count] = child
non_property_count += 1
element.block = [block_reduced[k] for k in block_reduced]
self.ltree = ltree_reduced
def css(self):
'''
Output minified CSS. Should not be run until LightYear code is
evaluated and the resulting structure flattened.
'''
root_blocks = []
for e in self.ltree:
if isinstance(e, RootBlock):
root_blocks.append(e)
if not root_blocks:
root_blocks.append(RootBlock(tag_name=None, prefix=''))
output = ''
for root_block in root_blocks:
output += root_block.prefix
if root_block.prefix:
output += '{'
output += ''.join(e.css(tag=root_block.tag_name, debug=self.debug)
if hasattr(e, 'css')
else ''
for e in self.ltree)
if root_block.prefix:
output += '}'
if self.vendorize == 'prefixr':
return vendorize_css(output)
return output
def pretty_css(self):
'''
Output prettified CSS.
'''
if self.vendorize == 'prefixr':
raise LyError('Unable to prettify prefixr.com CSS')
def inside(index, chars):
j = index
try:
while True:
j += 1
if chars[j] == '}' or chars[j] == ';':
return True
elif chars[j] == '{':
return False
except IndexError:
return False
css_chars = list(self.css())
# Insert spaces and newlines.
skip = False
for i, c in enumerate(css_chars):
this_two = ''.join(css_chars[i:i+2]) if len(css_chars) > i+1 else None
next_two = ''.join(css_chars[i+1:i+3]) if len(css_chars) > i+2 else None
third = css_chars[i+2] if len(css_chars) > i+2 else None
if c == ';' and not next_two == '/*':
css_chars.insert(i+1, '\n')
elif this_two == '/*':
if skip:
skip = False
continue
css_chars.insert(i, ' ')
skip = True
elif c == ':' and inside(i, css_chars):
css_chars.insert(i+1, ' ')
elif c == '{':
if skip:
skip = False
continue
css_chars.insert(i+1, '\n')
css_chars.insert(i, ' ')
skip = True
elif c == '}':
css_chars.insert(i+1, '\n')
elif this_two == '*/' and not third == '{':
css_chars.insert(i+2, '\n')
elif c == ',':
if not css_chars[i+1] == ' ':
css_chars[i] = ', '
if css_chars[i-1] == ' ':
css_chars[i-1] = ''
# Insert Indentation
dent = 0
tab = ' '
for i, c in enumerate(css_chars):
next = css_chars[i+1] if len(css_chars) > i+1 else None
if next == '}':
dent -= 1
if c == '{':
dent += 1
elif c == '\n':
css_chars.insert(i+1, tab*dent)
if next == '}':
dent += 1
return ''.join(css_chars)
# Import LightYear grammar after LY class definition.
from . import lang
### PRE-PEG TOKENIZATION ###
def tokenize_whitespace(lines):
"""
For each line, indentify current level of indendation and compare
against indentation of previous line. Insert BLK_OPEN or BLK_CLOSE
as appropriate.
"""
firstline = True
prevdent = 0
for lnum, line in enumerate(lines):
line = line.expandtabs(INDENT_SIZE)
# Don't allow empty lines to effect tracking of indentation.
stripped = line.strip()
if stripped == '' or stripped[:2] == '//':
yield line
continue
# Check for indentation on the first line.
if firstline:
if line[0] == " ":
raise IndentError(lnum)
firstline = False
leading_spaces = re.match('[ ]*', line).group()
curdent = len(leading_spaces) // INDENT_SIZE
if curdent == prevdent:
yield line
elif curdent == prevdent + 1:
yield BLK_OPEN + line
elif curdent < prevdent:
yield BLK_CLOSE * (prevdent - curdent) + line
else:
raise IndentError(lnum)
prevdent = curdent
# Handle indented last line.
yield BLK_CLOSE * prevdent
def tokenize_comments(lines):
'''
Identify and tokenize comments.
'''
for line in lines:
for possible in (x.start(0) for x in re.finditer('//', line)):
if not _isquoted(line, possible):
line = line[:possible] + COMMENT_OPEN + line[possible:] + COMMENT_CLOSE
break
yield line
def _isquoted(line, pos):
'''
Return boolean value indicating whether the character at position
pos resides within a quote.
'''
DQUO = False
SQUO = False
for i in range(0, pos):
if not DQUO and not SQUO:
if line[i] == '"':
DQUO = True
elif line[i] == "'":
SQUO = True
elif DQUO:
if line[i] == '"':
DQUO = False
elif SQUO:
if line[i] == "'":
SQUO = False
return (DQUO or SQUO)
### DEBUG ###
class DebugGenerator():
def __init__(self, ly_code):
self.ly_code = ly_code
def line_number(self, index):
return self.ly_code[:index].count('\n') + 1
def line_number_comment(self, index):
if index == 'generated':
return '/*GENERATED*/'
return '/*line{}*/'.format(self.line_number(index))
def __nonzero__(self):
return True
|
|
# Copyright 2013 UnitedStack Inc.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import datetime
from ironic_lib import metrics_utils
from oslo_utils import uuidutils
import pecan
from pecan import rest
from six.moves import http_client
import wsme
from wsme import types as wtypes
from ironic.api.controllers import base
from ironic.api.controllers import link
from ironic.api.controllers.v1 import collection
from ironic.api.controllers.v1 import notification_utils as notify
from ironic.api.controllers.v1 import types
from ironic.api.controllers.v1 import utils as api_utils
from ironic.api import expose
from ironic.common import exception
from ironic.common.i18n import _
from ironic.common import policy
from ironic.common import utils as common_utils
from ironic import objects
METRICS = metrics_utils.get_metrics_logger(__name__)
_DEFAULT_RETURN_FIELDS = ('uuid', 'address')
def hide_fields_in_newer_versions(obj):
# if requested version is < 1.18, hide internal_info field
if not api_utils.allow_port_internal_info():
obj.internal_info = wsme.Unset
# if requested version is < 1.19, hide local_link_connection and
# pxe_enabled fields
if not api_utils.allow_port_advanced_net_fields():
obj.pxe_enabled = wsme.Unset
obj.local_link_connection = wsme.Unset
# if requested version is < 1.24, hide portgroup_uuid field
if not api_utils.allow_portgroups_subcontrollers():
obj.portgroup_uuid = wsme.Unset
class Port(base.APIBase):
"""API representation of a port.
This class enforces type checking and value constraints, and converts
between the internal object model and the API representation of a port.
"""
_node_uuid = None
_portgroup_uuid = None
def _get_node_uuid(self):
return self._node_uuid
def _set_node_uuid(self, value):
if value and self._node_uuid != value:
try:
# FIXME(comstud): One should only allow UUID here, but
# there seems to be a bug in that tests are passing an
# ID. See bug #1301046 for more details.
node = objects.Node.get(pecan.request.context, value)
self._node_uuid = node.uuid
# NOTE(lucasagomes): Create the node_id attribute on-the-fly
# to satisfy the api -> rpc object
# conversion.
self.node_id = node.id
except exception.NodeNotFound as e:
# Change error code because 404 (NotFound) is inappropriate
# response for a POST request to create a Port
e.code = http_client.BAD_REQUEST # BadRequest
raise
elif value == wtypes.Unset:
self._node_uuid = wtypes.Unset
def _get_portgroup_uuid(self):
return self._portgroup_uuid
def _set_portgroup_uuid(self, value):
if value and self._portgroup_uuid != value:
if not api_utils.allow_portgroups_subcontrollers():
self._portgroup_uuid = wtypes.Unset
return
try:
portgroup = objects.Portgroup.get(pecan.request.context, value)
if portgroup.node_id != self.node_id:
raise exception.BadRequest(_('Port can not be added to a '
'portgroup belonging to a '
'different node.'))
self._portgroup_uuid = portgroup.uuid
# NOTE(lucasagomes): Create the portgroup_id attribute
# on-the-fly to satisfy the api ->
# rpc object conversion.
self.portgroup_id = portgroup.id
except exception.PortgroupNotFound as e:
# Change error code because 404 (NotFound) is inappropriate
# response for a POST request to create a Port
e.code = http_client.BAD_REQUEST # BadRequest
raise e
elif value == wtypes.Unset:
self._portgroup_uuid = wtypes.Unset
elif value is None and api_utils.allow_portgroups_subcontrollers():
# This is to output portgroup_uuid field if API version allows this
self._portgroup_uuid = None
uuid = types.uuid
"""Unique UUID for this port"""
address = wsme.wsattr(types.macaddress, mandatory=True)
"""MAC Address for this port"""
extra = {wtypes.text: types.jsontype}
"""This port's meta data"""
internal_info = wsme.wsattr({wtypes.text: types.jsontype}, readonly=True)
"""This port's internal information maintained by ironic"""
node_uuid = wsme.wsproperty(types.uuid, _get_node_uuid, _set_node_uuid,
mandatory=True)
"""The UUID of the node this port belongs to"""
portgroup_uuid = wsme.wsproperty(types.uuid, _get_portgroup_uuid,
_set_portgroup_uuid, mandatory=False)
"""The UUID of the portgroup this port belongs to"""
pxe_enabled = types.boolean
"""Indicates whether pxe is enabled or disabled on the node."""
local_link_connection = types.locallinkconnectiontype
"""The port binding profile for the port"""
links = wsme.wsattr([link.Link], readonly=True)
"""A list containing a self link and associated port links"""
def __init__(self, **kwargs):
self.fields = []
fields = list(objects.Port.fields)
# NOTE(lucasagomes): node_uuid is not part of objects.Port.fields
# because it's an API-only attribute
fields.append('node_uuid')
# NOTE: portgroup_uuid is not part of objects.Port.fields
# because it's an API-only attribute
fields.append('portgroup_uuid')
for field in fields:
# Add fields we expose.
if hasattr(self, field):
self.fields.append(field)
setattr(self, field, kwargs.get(field, wtypes.Unset))
# NOTE(lucasagomes): node_id is an attribute created on-the-fly
# by _set_node_uuid(), it needs to be present in the fields so
# that as_dict() will contain node_id field when converting it
# before saving it in the database.
self.fields.append('node_id')
setattr(self, 'node_uuid', kwargs.get('node_id', wtypes.Unset))
# NOTE: portgroup_id is an attribute created on-the-fly
# by _set_portgroup_uuid(), it needs to be present in the fields so
# that as_dict() will contain portgroup_id field when converting it
# before saving it in the database.
self.fields.append('portgroup_id')
setattr(self, 'portgroup_uuid', kwargs.get('portgroup_id',
wtypes.Unset))
@staticmethod
def _convert_with_links(port, url, fields=None):
# NOTE(lucasagomes): Since we are able to return a specified set of
# fields the "uuid" can be unset, so we need to save it in another
# variable to use when building the links
port_uuid = port.uuid
if fields is not None:
port.unset_fields_except(fields)
# never expose the node_id attribute
port.node_id = wtypes.Unset
# never expose the portgroup_id attribute
port.portgroup_id = wtypes.Unset
port.links = [link.Link.make_link('self', url,
'ports', port_uuid),
link.Link.make_link('bookmark', url,
'ports', port_uuid,
bookmark=True)
]
return port
@classmethod
def convert_with_links(cls, rpc_port, fields=None):
port = Port(**rpc_port.as_dict())
if fields is not None:
api_utils.check_for_invalid_fields(fields, port.as_dict())
hide_fields_in_newer_versions(port)
return cls._convert_with_links(port, pecan.request.public_url,
fields=fields)
@classmethod
def sample(cls, expand=True):
sample = cls(uuid='27e3153e-d5bf-4b7e-b517-fb518e17f34c',
address='fe:54:00:77:07:d9',
extra={'foo': 'bar'},
internal_info={},
created_at=datetime.datetime.utcnow(),
updated_at=datetime.datetime.utcnow(),
pxe_enabled=True,
local_link_connection={
'switch_info': 'host', 'port_id': 'Gig0/1',
'switch_id': 'aa:bb:cc:dd:ee:ff'})
# NOTE(lucasagomes): node_uuid getter() method look at the
# _node_uuid variable
sample._node_uuid = '7ae81bb3-dec3-4289-8d6c-da80bd8001ae'
sample._portgroup_uuid = '037d9a52-af89-4560-b5a3-a33283295ba2'
fields = None if expand else _DEFAULT_RETURN_FIELDS
return cls._convert_with_links(sample, 'http://localhost:6385',
fields=fields)
class PortPatchType(types.JsonPatchType):
_api_base = Port
@staticmethod
def internal_attrs():
defaults = types.JsonPatchType.internal_attrs()
return defaults + ['/internal_info']
class PortCollection(collection.Collection):
"""API representation of a collection of ports."""
ports = [Port]
"""A list containing ports objects"""
def __init__(self, **kwargs):
self._type = 'ports'
@staticmethod
def convert_with_links(rpc_ports, limit, url=None, fields=None, **kwargs):
collection = PortCollection()
collection.ports = [Port.convert_with_links(p, fields=fields)
for p in rpc_ports]
collection.next = collection.get_next(limit, url=url, **kwargs)
return collection
@classmethod
def sample(cls):
sample = cls()
sample.ports = [Port.sample(expand=False)]
return sample
class PortsController(rest.RestController):
"""REST controller for Ports."""
_custom_actions = {
'detail': ['GET'],
}
invalid_sort_key_list = ['extra', 'internal_info', 'local_link_connection']
advanced_net_fields = ['pxe_enabled', 'local_link_connection']
def __init__(self, node_ident=None, portgroup_ident=None):
super(PortsController, self).__init__()
self.parent_node_ident = node_ident
self.parent_portgroup_ident = portgroup_ident
def _get_ports_collection(self, node_ident, address, portgroup_ident,
marker, limit, sort_key, sort_dir,
resource_url=None, fields=None):
limit = api_utils.validate_limit(limit)
sort_dir = api_utils.validate_sort_dir(sort_dir)
marker_obj = None
if marker:
marker_obj = objects.Port.get_by_uuid(pecan.request.context,
marker)
if sort_key in self.invalid_sort_key_list:
raise exception.InvalidParameterValue(
_("The sort_key value %(key)s is an invalid field for "
"sorting") % {'key': sort_key})
node_ident = self.parent_node_ident or node_ident
portgroup_ident = self.parent_portgroup_ident or portgroup_ident
if node_ident and portgroup_ident:
raise exception.OperationNotPermitted()
if portgroup_ident:
# FIXME: Since all we need is the portgroup ID, we can
# make this more efficient by only querying
# for that column. This will get cleaned up
# as we move to the object interface.
portgroup = api_utils.get_rpc_portgroup(portgroup_ident)
ports = objects.Port.list_by_portgroup_id(pecan.request.context,
portgroup.id, limit,
marker_obj,
sort_key=sort_key,
sort_dir=sort_dir)
elif node_ident:
# FIXME(comstud): Since all we need is the node ID, we can
# make this more efficient by only querying
# for that column. This will get cleaned up
# as we move to the object interface.
node = api_utils.get_rpc_node(node_ident)
ports = objects.Port.list_by_node_id(pecan.request.context,
node.id, limit, marker_obj,
sort_key=sort_key,
sort_dir=sort_dir)
elif address:
ports = self._get_ports_by_address(address)
else:
ports = objects.Port.list(pecan.request.context, limit,
marker_obj, sort_key=sort_key,
sort_dir=sort_dir)
return PortCollection.convert_with_links(ports, limit,
url=resource_url,
fields=fields,
sort_key=sort_key,
sort_dir=sort_dir)
def _get_ports_by_address(self, address):
"""Retrieve a port by its address.
:param address: MAC address of a port, to get the port which has
this MAC address.
:returns: a list with the port, or an empty list if no port is found.
"""
try:
port = objects.Port.get_by_address(pecan.request.context, address)
return [port]
except exception.PortNotFound:
return []
@METRICS.timer('PortsController.get_all')
@expose.expose(PortCollection, types.uuid_or_name, types.uuid,
types.macaddress, types.uuid, int, wtypes.text,
wtypes.text, types.listtype, types.uuid_or_name)
def get_all(self, node=None, node_uuid=None, address=None, marker=None,
limit=None, sort_key='id', sort_dir='asc', fields=None,
portgroup=None):
"""Retrieve a list of ports.
Note that the 'node_uuid' interface is deprecated in favour
of the 'node' interface
:param node: UUID or name of a node, to get only ports for that
node.
:param node_uuid: UUID of a node, to get only ports for that
node.
:param address: MAC address of a port, to get the port which has
this MAC address.
:param marker: pagination marker for large data sets.
:param limit: maximum number of resources to return in a single result.
This value cannot be larger than the value of max_limit
in the [api] section of the ironic configuration, or only
max_limit resources will be returned.
:param sort_key: column to sort results by. Default: id.
:param sort_dir: direction to sort. "asc" or "desc". Default: asc.
:param fields: Optional, a list with a specified set of fields
of the resource to be returned.
:param portgroup: UUID or name of a portgroup, to get only ports
for that portgroup.
:raises: NotAcceptable, HTTPNotFound
"""
cdict = pecan.request.context.to_policy_values()
policy.authorize('baremetal:port:get', cdict, cdict)
api_utils.check_allow_specify_fields(fields)
if fields:
if (not api_utils.allow_port_advanced_net_fields() and
set(fields).intersection(self.advanced_net_fields)):
raise exception.NotAcceptable()
if ('portgroup_uuid' in fields and not
api_utils.allow_portgroups_subcontrollers()):
raise exception.NotAcceptable()
if portgroup and not api_utils.allow_portgroups_subcontrollers():
raise exception.NotAcceptable()
if fields is None:
fields = _DEFAULT_RETURN_FIELDS
if not node_uuid and node:
# We're invoking this interface using positional notation, or
# explicitly using 'node'. Try and determine which one.
# Make sure only one interface, node or node_uuid is used
if (not api_utils.allow_node_logical_names() and
not uuidutils.is_uuid_like(node)):
raise exception.NotAcceptable()
return self._get_ports_collection(node_uuid or node, address,
portgroup, marker, limit, sort_key,
sort_dir, fields=fields)
@METRICS.timer('PortsController.detail')
@expose.expose(PortCollection, types.uuid_or_name, types.uuid,
types.macaddress, types.uuid, int, wtypes.text,
wtypes.text, types.uuid_or_name)
def detail(self, node=None, node_uuid=None, address=None, marker=None,
limit=None, sort_key='id', sort_dir='asc', portgroup=None):
"""Retrieve a list of ports with detail.
Note that the 'node_uuid' interface is deprecated in favour
of the 'node' interface
:param node: UUID or name of a node, to get only ports for that
node.
:param node_uuid: UUID of a node, to get only ports for that
node.
:param address: MAC address of a port, to get the port which has
this MAC address.
:param portgroup: UUID or name of a portgroup, to get only ports
for that portgroup.
:param marker: pagination marker for large data sets.
:param limit: maximum number of resources to return in a single result.
This value cannot be larger than the value of max_limit
in the [api] section of the ironic configuration, or only
max_limit resources will be returned.
:param sort_key: column to sort results by. Default: id.
:param sort_dir: direction to sort. "asc" or "desc". Default: asc.
:raises: NotAcceptable, HTTPNotFound
"""
cdict = pecan.request.context.to_policy_values()
policy.authorize('baremetal:port:get', cdict, cdict)
if portgroup and not api_utils.allow_portgroups_subcontrollers():
raise exception.NotAcceptable()
if not node_uuid and node:
# We're invoking this interface using positional notation, or
# explicitly using 'node'. Try and determine which one.
# Make sure only one interface, node or node_uuid is used
if (not api_utils.allow_node_logical_names() and
not uuidutils.is_uuid_like(node)):
raise exception.NotAcceptable()
# NOTE(lucasagomes): /detail should only work against collections
parent = pecan.request.path.split('/')[:-1][-1]
if parent != "ports":
raise exception.HTTPNotFound()
resource_url = '/'.join(['ports', 'detail'])
return self._get_ports_collection(node_uuid or node, address,
portgroup, marker, limit, sort_key,
sort_dir, resource_url)
@METRICS.timer('PortsController.get_one')
@expose.expose(Port, types.uuid, types.listtype)
def get_one(self, port_uuid, fields=None):
"""Retrieve information about the given port.
:param port_uuid: UUID of a port.
:param fields: Optional, a list with a specified set of fields
of the resource to be returned.
:raises: NotAcceptable, HTTPNotFound
"""
cdict = pecan.request.context.to_policy_values()
policy.authorize('baremetal:port:get', cdict, cdict)
if self.parent_node_ident or self.parent_portgroup_ident:
raise exception.OperationNotPermitted()
api_utils.check_allow_specify_fields(fields)
rpc_port = objects.Port.get_by_uuid(pecan.request.context, port_uuid)
return Port.convert_with_links(rpc_port, fields=fields)
@METRICS.timer('PortsController.post')
@expose.expose(Port, body=Port, status_code=http_client.CREATED)
def post(self, port):
"""Create a new port.
:param port: a port within the request body.
:raises: NotAcceptable, HTTPNotFound, Conflict
"""
context = pecan.request.context
cdict = context.to_policy_values()
policy.authorize('baremetal:port:create', cdict, cdict)
if self.parent_node_ident or self.parent_portgroup_ident:
raise exception.OperationNotPermitted()
pdict = port.as_dict()
if (not api_utils.allow_port_advanced_net_fields() and
set(pdict).intersection(self.advanced_net_fields)):
raise exception.NotAcceptable()
if (not api_utils.allow_portgroups_subcontrollers() and
'portgroup_uuid' in pdict):
raise exception.NotAcceptable()
extra = pdict.get('extra')
vif = extra.get('vif_port_id') if extra else None
if vif:
common_utils.warn_about_deprecated_extra_vif_port_id()
if (pdict.get('portgroup_uuid') and
(pdict.get('pxe_enabled') or vif)):
rpc_pg = objects.Portgroup.get_by_uuid(context,
pdict['portgroup_uuid'])
if not rpc_pg.standalone_ports_supported:
msg = _("Port group %s doesn't support standalone ports. "
"This port cannot be created as a member of that "
"port group because either 'extra/vif_port_id' "
"was specified or 'pxe_enabled' was set to True.")
raise exception.Conflict(
msg % pdict['portgroup_uuid'])
# NOTE(yuriyz): UUID is mandatory for notifications payload
if not pdict.get('uuid'):
pdict['uuid'] = uuidutils.generate_uuid()
new_port = objects.Port(context, **pdict)
notify.emit_start_notification(context, new_port, 'create',
node_uuid=port.node_uuid)
with notify.handle_error_notification(context, new_port, 'create',
node_uuid=port.node_uuid):
new_port.create()
notify.emit_end_notification(context, new_port, 'create',
node_uuid=port.node_uuid)
# Set the HTTP Location Header
pecan.response.location = link.build_url('ports', new_port.uuid)
return Port.convert_with_links(new_port)
@METRICS.timer('PortsController.patch')
@wsme.validate(types.uuid, [PortPatchType])
@expose.expose(Port, types.uuid, body=[PortPatchType])
def patch(self, port_uuid, patch):
"""Update an existing port.
:param port_uuid: UUID of a port.
:param patch: a json PATCH document to apply to this port.
:raises: NotAcceptable, HTTPNotFound
"""
context = pecan.request.context
cdict = context.to_policy_values()
policy.authorize('baremetal:port:update', cdict, cdict)
if self.parent_node_ident or self.parent_portgroup_ident:
raise exception.OperationNotPermitted()
fields_to_check = set()
for field in self.advanced_net_fields + ['portgroup_uuid']:
field_path = '/%s' % field
if (api_utils.get_patch_values(patch, field_path) or
api_utils.is_path_removed(patch, field_path)):
fields_to_check.add(field)
if (fields_to_check.intersection(self.advanced_net_fields) and
not api_utils.allow_port_advanced_net_fields()):
raise exception.NotAcceptable()
if ('portgroup_uuid' in fields_to_check and
not api_utils.allow_portgroups_subcontrollers()):
raise exception.NotAcceptable()
rpc_port = objects.Port.get_by_uuid(context, port_uuid)
try:
port_dict = rpc_port.as_dict()
# NOTE(lucasagomes):
# 1) Remove node_id because it's an internal value and
# not present in the API object
# 2) Add node_uuid
port_dict['node_uuid'] = port_dict.pop('node_id', None)
# NOTE(vsaienko):
# 1) Remove portgroup_id because it's an internal value and
# not present in the API object
# 2) Add portgroup_uuid
port_dict['portgroup_uuid'] = port_dict.pop('portgroup_id', None)
port = Port(**api_utils.apply_jsonpatch(port_dict, patch))
except api_utils.JSONPATCH_EXCEPTIONS as e:
raise exception.PatchError(patch=patch, reason=e)
if api_utils.is_path_removed(patch, '/portgroup_uuid'):
rpc_port.portgroup_id = None
# Update only the fields that have changed
for field in objects.Port.fields:
try:
patch_val = getattr(port, field)
except AttributeError:
# Ignore fields that aren't exposed in the API
continue
if patch_val == wtypes.Unset:
patch_val = None
if rpc_port[field] != patch_val:
rpc_port[field] = patch_val
rpc_node = objects.Node.get_by_id(context, rpc_port.node_id)
notify.emit_start_notification(context, rpc_port, 'update',
node_uuid=rpc_node.uuid)
with notify.handle_error_notification(context, rpc_port, 'update',
node_uuid=rpc_node.uuid):
topic = pecan.request.rpcapi.get_topic_for(rpc_node)
new_port = pecan.request.rpcapi.update_port(context, rpc_port,
topic)
api_port = Port.convert_with_links(new_port)
notify.emit_end_notification(context, new_port, 'update',
node_uuid=api_port.node_uuid)
return api_port
@METRICS.timer('PortsController.delete')
@expose.expose(None, types.uuid, status_code=http_client.NO_CONTENT)
def delete(self, port_uuid):
"""Delete a port.
:param port_uuid: UUID of a port.
:raises: OperationNotPermitted, HTTPNotFound
"""
context = pecan.request.context
cdict = context.to_policy_values()
policy.authorize('baremetal:port:delete', cdict, cdict)
if self.parent_node_ident or self.parent_portgroup_ident:
raise exception.OperationNotPermitted()
rpc_port = objects.Port.get_by_uuid(context, port_uuid)
rpc_node = objects.Node.get_by_id(context, rpc_port.node_id)
notify.emit_start_notification(context, rpc_port, 'delete',
node_uuid=rpc_node.uuid)
with notify.handle_error_notification(context, rpc_port, 'delete',
node_uuid=rpc_node.uuid):
topic = pecan.request.rpcapi.get_topic_for(rpc_node)
pecan.request.rpcapi.destroy_port(context, rpc_port, topic)
notify.emit_end_notification(context, rpc_port, 'delete',
node_uuid=rpc_node.uuid)
|
|
# firebird/base.py
# Copyright (C) 2005-2016 the SQLAlchemy authors and contributors
# <see AUTHORS file>
#
# This module is part of SQLAlchemy and is released under
# the MIT License: http://www.opensource.org/licenses/mit-license.php
"""
.. dialect:: firebird
:name: Firebird
Firebird Dialects
-----------------
Firebird offers two distinct dialects_ (not to be confused with a
SQLAlchemy ``Dialect``):
dialect 1
This is the old syntax and behaviour, inherited from Interbase pre-6.0.
dialect 3
This is the newer and supported syntax, introduced in Interbase 6.0.
The SQLAlchemy Firebird dialect detects these versions and
adjusts its representation of SQL accordingly. However,
support for dialect 1 is not well tested and probably has
incompatibilities.
Locking Behavior
----------------
Firebird locks tables aggressively. For this reason, a DROP TABLE may
hang until other transactions are released. SQLAlchemy does its best
to release transactions as quickly as possible. The most common cause
of hanging transactions is a non-fully consumed result set, i.e.::
result = engine.execute("select * from table")
row = result.fetchone()
return
Where above, the ``ResultProxy`` has not been fully consumed. The
connection will be returned to the pool and the transactional state
rolled back once the Python garbage collector reclaims the objects
which hold onto the connection, which often occurs asynchronously.
The above use case can be alleviated by calling ``first()`` on the
``ResultProxy`` which will fetch the first row and immediately close
all remaining cursor/connection resources.
RETURNING support
-----------------
Firebird 2.0 supports returning a result set from inserts, and 2.1
extends that to deletes and updates. This is generically exposed by
the SQLAlchemy ``returning()`` method, such as::
# INSERT..RETURNING
result = table.insert().returning(table.c.col1, table.c.col2).\\
values(name='foo')
print result.fetchall()
# UPDATE..RETURNING
raises = empl.update().returning(empl.c.id, empl.c.salary).\\
where(empl.c.sales>100).\\
values(dict(salary=empl.c.salary * 1.1))
print raises.fetchall()
.. _dialects: http://mc-computing.com/Databases/Firebird/SQL_Dialect.html
"""
import datetime
from sqlalchemy import schema as sa_schema
from sqlalchemy import exc, types as sqltypes, sql, util
from sqlalchemy.sql import expression
from sqlalchemy.engine import base, default, reflection
from sqlalchemy.sql import compiler
from sqlalchemy.sql.elements import quoted_name
from sqlalchemy.types import (BIGINT, BLOB, DATE, FLOAT, INTEGER, NUMERIC,
SMALLINT, TEXT, TIME, TIMESTAMP, Integer)
RESERVED_WORDS = set([
"active", "add", "admin", "after", "all", "alter", "and", "any", "as",
"asc", "ascending", "at", "auto", "avg", "before", "begin", "between",
"bigint", "bit_length", "blob", "both", "by", "case", "cast", "char",
"character", "character_length", "char_length", "check", "close",
"collate", "column", "commit", "committed", "computed", "conditional",
"connect", "constraint", "containing", "count", "create", "cross",
"cstring", "current", "current_connection", "current_date",
"current_role", "current_time", "current_timestamp",
"current_transaction", "current_user", "cursor", "database", "date",
"day", "dec", "decimal", "declare", "default", "delete", "desc",
"descending", "disconnect", "distinct", "do", "domain", "double",
"drop", "else", "end", "entry_point", "escape", "exception",
"execute", "exists", "exit", "external", "extract", "fetch", "file",
"filter", "float", "for", "foreign", "from", "full", "function",
"gdscode", "generator", "gen_id", "global", "grant", "group",
"having", "hour", "if", "in", "inactive", "index", "inner",
"input_type", "insensitive", "insert", "int", "integer", "into", "is",
"isolation", "join", "key", "leading", "left", "length", "level",
"like", "long", "lower", "manual", "max", "maximum_segment", "merge",
"min", "minute", "module_name", "month", "names", "national",
"natural", "nchar", "no", "not", "null", "numeric", "octet_length",
"of", "on", "only", "open", "option", "or", "order", "outer",
"output_type", "overflow", "page", "pages", "page_size", "parameter",
"password", "plan", "position", "post_event", "precision", "primary",
"privileges", "procedure", "protected", "rdb$db_key", "read", "real",
"record_version", "recreate", "recursive", "references", "release",
"reserv", "reserving", "retain", "returning_values", "returns",
"revoke", "right", "rollback", "rows", "row_count", "savepoint",
"schema", "second", "segment", "select", "sensitive", "set", "shadow",
"shared", "singular", "size", "smallint", "snapshot", "some", "sort",
"sqlcode", "stability", "start", "starting", "starts", "statistics",
"sub_type", "sum", "suspend", "table", "then", "time", "timestamp",
"to", "trailing", "transaction", "trigger", "trim", "uncommitted",
"union", "unique", "update", "upper", "user", "using", "value",
"values", "varchar", "variable", "varying", "view", "wait", "when",
"where", "while", "with", "work", "write", "year",
])
class _StringType(sqltypes.String):
"""Base for Firebird string types."""
def __init__(self, charset=None, **kw):
self.charset = charset
super(_StringType, self).__init__(**kw)
class VARCHAR(_StringType, sqltypes.VARCHAR):
"""Firebird VARCHAR type"""
__visit_name__ = 'VARCHAR'
def __init__(self, length=None, **kwargs):
super(VARCHAR, self).__init__(length=length, **kwargs)
class CHAR(_StringType, sqltypes.CHAR):
"""Firebird CHAR type"""
__visit_name__ = 'CHAR'
def __init__(self, length=None, **kwargs):
super(CHAR, self).__init__(length=length, **kwargs)
class _FBDateTime(sqltypes.DateTime):
def bind_processor(self, dialect):
def process(value):
if type(value) == datetime.date:
return datetime.datetime(value.year, value.month, value.day)
else:
return value
return process
colspecs = {
sqltypes.DateTime: _FBDateTime
}
ischema_names = {
'SHORT': SMALLINT,
'LONG': INTEGER,
'QUAD': FLOAT,
'FLOAT': FLOAT,
'DATE': DATE,
'TIME': TIME,
'TEXT': TEXT,
'INT64': BIGINT,
'DOUBLE': FLOAT,
'TIMESTAMP': TIMESTAMP,
'VARYING': VARCHAR,
'CSTRING': CHAR,
'BLOB': BLOB,
}
# TODO: date conversion types (should be implemented as _FBDateTime,
# _FBDate, etc. as bind/result functionality is required)
class FBTypeCompiler(compiler.GenericTypeCompiler):
def visit_boolean(self, type_, **kw):
return self.visit_SMALLINT(type_, **kw)
def visit_datetime(self, type_, **kw):
return self.visit_TIMESTAMP(type_, **kw)
def visit_TEXT(self, type_, **kw):
return "BLOB SUB_TYPE 1"
def visit_BLOB(self, type_, **kw):
return "BLOB SUB_TYPE 0"
def _extend_string(self, type_, basic):
charset = getattr(type_, 'charset', None)
if charset is None:
return basic
else:
return '%s CHARACTER SET %s' % (basic, charset)
def visit_CHAR(self, type_, **kw):
basic = super(FBTypeCompiler, self).visit_CHAR(type_, **kw)
return self._extend_string(type_, basic)
def visit_VARCHAR(self, type_, **kw):
if not type_.length:
raise exc.CompileError(
"VARCHAR requires a length on dialect %s" %
self.dialect.name)
basic = super(FBTypeCompiler, self).visit_VARCHAR(type_, **kw)
return self._extend_string(type_, basic)
class FBCompiler(sql.compiler.SQLCompiler):
"""Firebird specific idiosyncrasies"""
ansi_bind_rules = True
# def visit_contains_op_binary(self, binary, operator, **kw):
# cant use CONTAINING b.c. it's case insensitive.
# def visit_notcontains_op_binary(self, binary, operator, **kw):
# cant use NOT CONTAINING b.c. it's case insensitive.
def visit_now_func(self, fn, **kw):
return "CURRENT_TIMESTAMP"
def visit_startswith_op_binary(self, binary, operator, **kw):
return '%s STARTING WITH %s' % (
binary.left._compiler_dispatch(self, **kw),
binary.right._compiler_dispatch(self, **kw))
def visit_notstartswith_op_binary(self, binary, operator, **kw):
return '%s NOT STARTING WITH %s' % (
binary.left._compiler_dispatch(self, **kw),
binary.right._compiler_dispatch(self, **kw))
def visit_mod_binary(self, binary, operator, **kw):
return "mod(%s, %s)" % (
self.process(binary.left, **kw),
self.process(binary.right, **kw))
def visit_alias(self, alias, asfrom=False, **kwargs):
if self.dialect._version_two:
return super(FBCompiler, self).\
visit_alias(alias, asfrom=asfrom, **kwargs)
else:
# Override to not use the AS keyword which FB 1.5 does not like
if asfrom:
alias_name = isinstance(alias.name,
expression._truncated_label) and \
self._truncated_identifier("alias",
alias.name) or alias.name
return self.process(
alias.original, asfrom=asfrom, **kwargs) + \
" " + \
self.preparer.format_alias(alias, alias_name)
else:
return self.process(alias.original, **kwargs)
def visit_substring_func(self, func, **kw):
s = self.process(func.clauses.clauses[0])
start = self.process(func.clauses.clauses[1])
if len(func.clauses.clauses) > 2:
length = self.process(func.clauses.clauses[2])
return "SUBSTRING(%s FROM %s FOR %s)" % (s, start, length)
else:
return "SUBSTRING(%s FROM %s)" % (s, start)
def visit_length_func(self, function, **kw):
if self.dialect._version_two:
return "char_length" + self.function_argspec(function)
else:
return "strlen" + self.function_argspec(function)
visit_char_length_func = visit_length_func
def function_argspec(self, func, **kw):
# TODO: this probably will need to be
# narrowed to a fixed list, some no-arg functions
# may require parens - see similar example in the oracle
# dialect
if func.clauses is not None and len(func.clauses):
return self.process(func.clause_expr, **kw)
else:
return ""
def default_from(self):
return " FROM rdb$database"
def visit_sequence(self, seq):
return "gen_id(%s, 1)" % self.preparer.format_sequence(seq)
def get_select_precolumns(self, select, **kw):
"""Called when building a ``SELECT`` statement, position is just
before column list Firebird puts the limit and offset right
after the ``SELECT``...
"""
result = ""
if select._limit_clause is not None:
result += "FIRST %s " % self.process(select._limit_clause, **kw)
if select._offset_clause is not None:
result += "SKIP %s " % self.process(select._offset_clause, **kw)
if select._distinct:
result += "DISTINCT "
return result
def limit_clause(self, select, **kw):
"""Already taken care of in the `get_select_precolumns` method."""
return ""
def returning_clause(self, stmt, returning_cols):
columns = [
self._label_select_column(None, c, True, False, {})
for c in expression._select_iterables(returning_cols)
]
return 'RETURNING ' + ', '.join(columns)
class FBDDLCompiler(sql.compiler.DDLCompiler):
"""Firebird syntactic idiosyncrasies"""
def visit_create_sequence(self, create):
"""Generate a ``CREATE GENERATOR`` statement for the sequence."""
# no syntax for these
# http://www.firebirdsql.org/manual/generatorguide-sqlsyntax.html
if create.element.start is not None:
raise NotImplemented(
"Firebird SEQUENCE doesn't support START WITH")
if create.element.increment is not None:
raise NotImplemented(
"Firebird SEQUENCE doesn't support INCREMENT BY")
if self.dialect._version_two:
return "CREATE SEQUENCE %s" % \
self.preparer.format_sequence(create.element)
else:
return "CREATE GENERATOR %s" % \
self.preparer.format_sequence(create.element)
def visit_drop_sequence(self, drop):
"""Generate a ``DROP GENERATOR`` statement for the sequence."""
if self.dialect._version_two:
return "DROP SEQUENCE %s" % \
self.preparer.format_sequence(drop.element)
else:
return "DROP GENERATOR %s" % \
self.preparer.format_sequence(drop.element)
class FBIdentifierPreparer(sql.compiler.IdentifierPreparer):
"""Install Firebird specific reserved words."""
reserved_words = RESERVED_WORDS
illegal_initial_characters = compiler.ILLEGAL_INITIAL_CHARACTERS.union(
['_'])
def __init__(self, dialect):
super(FBIdentifierPreparer, self).__init__(dialect, omit_schema=True)
class FBExecutionContext(default.DefaultExecutionContext):
def fire_sequence(self, seq, type_):
"""Get the next value from the sequence using ``gen_id()``."""
return self._execute_scalar(
"SELECT gen_id(%s, 1) FROM rdb$database" %
self.dialect.identifier_preparer.format_sequence(seq),
type_
)
class FBDialect(default.DefaultDialect):
"""Firebird dialect"""
name = 'firebird'
max_identifier_length = 31
supports_sequences = True
sequences_optional = False
supports_default_values = True
postfetch_lastrowid = False
supports_native_boolean = False
requires_name_normalize = True
supports_empty_insert = False
statement_compiler = FBCompiler
ddl_compiler = FBDDLCompiler
preparer = FBIdentifierPreparer
type_compiler = FBTypeCompiler
execution_ctx_cls = FBExecutionContext
colspecs = colspecs
ischema_names = ischema_names
construct_arguments = []
# defaults to dialect ver. 3,
# will be autodetected off upon
# first connect
_version_two = True
def initialize(self, connection):
super(FBDialect, self).initialize(connection)
self._version_two = ('firebird' in self.server_version_info and
self.server_version_info >= (2, )
) or \
('interbase' in self.server_version_info and
self.server_version_info >= (6, )
)
if not self._version_two:
# TODO: whatever other pre < 2.0 stuff goes here
self.ischema_names = ischema_names.copy()
self.ischema_names['TIMESTAMP'] = sqltypes.DATE
self.colspecs = {
sqltypes.DateTime: sqltypes.DATE
}
self.implicit_returning = self._version_two and \
self.__dict__.get('implicit_returning', True)
def normalize_name(self, name):
# Remove trailing spaces: FB uses a CHAR() type,
# that is padded with spaces
name = name and name.rstrip()
if name is None:
return None
elif name.upper() == name and \
not self.identifier_preparer._requires_quotes(name.lower()):
return name.lower()
elif name.lower() == name:
return quoted_name(name, quote=True)
else:
return name
def denormalize_name(self, name):
if name is None:
return None
elif name.lower() == name and \
not self.identifier_preparer._requires_quotes(name.lower()):
return name.upper()
else:
return name
def has_table(self, connection, table_name, schema=None):
"""Return ``True`` if the given table exists, ignoring
the `schema`."""
tblqry = """
SELECT 1 AS has_table FROM rdb$database
WHERE EXISTS (SELECT rdb$relation_name
FROM rdb$relations
WHERE rdb$relation_name=?)
"""
c = connection.execute(tblqry, [self.denormalize_name(table_name)])
return c.first() is not None
def has_sequence(self, connection, sequence_name, schema=None):
"""Return ``True`` if the given sequence (generator) exists."""
genqry = """
SELECT 1 AS has_sequence FROM rdb$database
WHERE EXISTS (SELECT rdb$generator_name
FROM rdb$generators
WHERE rdb$generator_name=?)
"""
c = connection.execute(genqry, [self.denormalize_name(sequence_name)])
return c.first() is not None
@reflection.cache
def get_table_names(self, connection, schema=None, **kw):
# there are two queries commonly mentioned for this.
# this one, using view_blr, is at the Firebird FAQ among other places:
# http://www.firebirdfaq.org/faq174/
s = """
select rdb$relation_name
from rdb$relations
where rdb$view_blr is null
and (rdb$system_flag is null or rdb$system_flag = 0);
"""
# the other query is this one. It's not clear if there's really
# any difference between these two. This link:
# http://www.alberton.info/firebird_sql_meta_info.html#.Ur3vXfZGni8
# states them as interchangeable. Some discussion at [ticket:2898]
# SELECT DISTINCT rdb$relation_name
# FROM rdb$relation_fields
# WHERE rdb$system_flag=0 AND rdb$view_context IS NULL
return [self.normalize_name(row[0]) for row in connection.execute(s)]
@reflection.cache
def get_view_names(self, connection, schema=None, **kw):
# see http://www.firebirdfaq.org/faq174/
s = """
select rdb$relation_name
from rdb$relations
where rdb$view_blr is not null
and (rdb$system_flag is null or rdb$system_flag = 0);
"""
return [self.normalize_name(row[0]) for row in connection.execute(s)]
@reflection.cache
def get_view_definition(self, connection, view_name, schema=None, **kw):
qry = """
SELECT rdb$view_source AS view_source
FROM rdb$relations
WHERE rdb$relation_name=?
"""
rp = connection.execute(qry, [self.denormalize_name(view_name)])
row = rp.first()
if row:
return row['view_source']
else:
return None
@reflection.cache
def get_pk_constraint(self, connection, table_name, schema=None, **kw):
# Query to extract the PK/FK constrained fields of the given table
keyqry = """
SELECT se.rdb$field_name AS fname
FROM rdb$relation_constraints rc
JOIN rdb$index_segments se ON rc.rdb$index_name=se.rdb$index_name
WHERE rc.rdb$constraint_type=? AND rc.rdb$relation_name=?
"""
tablename = self.denormalize_name(table_name)
# get primary key fields
c = connection.execute(keyqry, ["PRIMARY KEY", tablename])
pkfields = [self.normalize_name(r['fname']) for r in c.fetchall()]
return {'constrained_columns': pkfields, 'name': None}
@reflection.cache
def get_column_sequence(self, connection,
table_name, column_name,
schema=None, **kw):
tablename = self.denormalize_name(table_name)
colname = self.denormalize_name(column_name)
# Heuristic-query to determine the generator associated to a PK field
genqry = """
SELECT trigdep.rdb$depended_on_name AS fgenerator
FROM rdb$dependencies tabdep
JOIN rdb$dependencies trigdep
ON tabdep.rdb$dependent_name=trigdep.rdb$dependent_name
AND trigdep.rdb$depended_on_type=14
AND trigdep.rdb$dependent_type=2
JOIN rdb$triggers trig ON
trig.rdb$trigger_name=tabdep.rdb$dependent_name
WHERE tabdep.rdb$depended_on_name=?
AND tabdep.rdb$depended_on_type=0
AND trig.rdb$trigger_type=1
AND tabdep.rdb$field_name=?
AND (SELECT count(*)
FROM rdb$dependencies trigdep2
WHERE trigdep2.rdb$dependent_name = trigdep.rdb$dependent_name) = 2
"""
genr = connection.execute(genqry, [tablename, colname]).first()
if genr is not None:
return dict(name=self.normalize_name(genr['fgenerator']))
@reflection.cache
def get_columns(self, connection, table_name, schema=None, **kw):
# Query to extract the details of all the fields of the given table
tblqry = """
SELECT r.rdb$field_name AS fname,
r.rdb$null_flag AS null_flag,
t.rdb$type_name AS ftype,
f.rdb$field_sub_type AS stype,
f.rdb$field_length/
COALESCE(cs.rdb$bytes_per_character,1) AS flen,
f.rdb$field_precision AS fprec,
f.rdb$field_scale AS fscale,
COALESCE(r.rdb$default_source,
f.rdb$default_source) AS fdefault
FROM rdb$relation_fields r
JOIN rdb$fields f ON r.rdb$field_source=f.rdb$field_name
JOIN rdb$types t
ON t.rdb$type=f.rdb$field_type AND
t.rdb$field_name='RDB$FIELD_TYPE'
LEFT JOIN rdb$character_sets cs ON
f.rdb$character_set_id=cs.rdb$character_set_id
WHERE f.rdb$system_flag=0 AND r.rdb$relation_name=?
ORDER BY r.rdb$field_position
"""
# get the PK, used to determine the eventual associated sequence
pk_constraint = self.get_pk_constraint(connection, table_name)
pkey_cols = pk_constraint['constrained_columns']
tablename = self.denormalize_name(table_name)
# get all of the fields for this table
c = connection.execute(tblqry, [tablename])
cols = []
while True:
row = c.fetchone()
if row is None:
break
name = self.normalize_name(row['fname'])
orig_colname = row['fname']
# get the data type
colspec = row['ftype'].rstrip()
coltype = self.ischema_names.get(colspec)
if coltype is None:
util.warn("Did not recognize type '%s' of column '%s'" %
(colspec, name))
coltype = sqltypes.NULLTYPE
elif issubclass(coltype, Integer) and row['fprec'] != 0:
coltype = NUMERIC(
precision=row['fprec'],
scale=row['fscale'] * -1)
elif colspec in ('VARYING', 'CSTRING'):
coltype = coltype(row['flen'])
elif colspec == 'TEXT':
coltype = TEXT(row['flen'])
elif colspec == 'BLOB':
if row['stype'] == 1:
coltype = TEXT()
else:
coltype = BLOB()
else:
coltype = coltype()
# does it have a default value?
defvalue = None
if row['fdefault'] is not None:
# the value comes down as "DEFAULT 'value'": there may be
# more than one whitespace around the "DEFAULT" keyword
# and it may also be lower case
# (see also http://tracker.firebirdsql.org/browse/CORE-356)
defexpr = row['fdefault'].lstrip()
assert defexpr[:8].rstrip().upper() == \
'DEFAULT', "Unrecognized default value: %s" % \
defexpr
defvalue = defexpr[8:].strip()
if defvalue == 'NULL':
# Redundant
defvalue = None
col_d = {
'name': name,
'type': coltype,
'nullable': not bool(row['null_flag']),
'default': defvalue,
'autoincrement': 'auto',
}
if orig_colname.lower() == orig_colname:
col_d['quote'] = True
# if the PK is a single field, try to see if its linked to
# a sequence thru a trigger
if len(pkey_cols) == 1 and name == pkey_cols[0]:
seq_d = self.get_column_sequence(connection, tablename, name)
if seq_d is not None:
col_d['sequence'] = seq_d
cols.append(col_d)
return cols
@reflection.cache
def get_foreign_keys(self, connection, table_name, schema=None, **kw):
# Query to extract the details of each UK/FK of the given table
fkqry = """
SELECT rc.rdb$constraint_name AS cname,
cse.rdb$field_name AS fname,
ix2.rdb$relation_name AS targetrname,
se.rdb$field_name AS targetfname
FROM rdb$relation_constraints rc
JOIN rdb$indices ix1 ON ix1.rdb$index_name=rc.rdb$index_name
JOIN rdb$indices ix2 ON ix2.rdb$index_name=ix1.rdb$foreign_key
JOIN rdb$index_segments cse ON
cse.rdb$index_name=ix1.rdb$index_name
JOIN rdb$index_segments se
ON se.rdb$index_name=ix2.rdb$index_name
AND se.rdb$field_position=cse.rdb$field_position
WHERE rc.rdb$constraint_type=? AND rc.rdb$relation_name=?
ORDER BY se.rdb$index_name, se.rdb$field_position
"""
tablename = self.denormalize_name(table_name)
c = connection.execute(fkqry, ["FOREIGN KEY", tablename])
fks = util.defaultdict(lambda: {
'name': None,
'constrained_columns': [],
'referred_schema': None,
'referred_table': None,
'referred_columns': []
})
for row in c:
cname = self.normalize_name(row['cname'])
fk = fks[cname]
if not fk['name']:
fk['name'] = cname
fk['referred_table'] = self.normalize_name(row['targetrname'])
fk['constrained_columns'].append(
self.normalize_name(row['fname']))
fk['referred_columns'].append(
self.normalize_name(row['targetfname']))
return list(fks.values())
@reflection.cache
def get_indexes(self, connection, table_name, schema=None, **kw):
qry = """
SELECT ix.rdb$index_name AS index_name,
ix.rdb$unique_flag AS unique_flag,
ic.rdb$field_name AS field_name
FROM rdb$indices ix
JOIN rdb$index_segments ic
ON ix.rdb$index_name=ic.rdb$index_name
LEFT OUTER JOIN rdb$relation_constraints
ON rdb$relation_constraints.rdb$index_name =
ic.rdb$index_name
WHERE ix.rdb$relation_name=? AND ix.rdb$foreign_key IS NULL
AND rdb$relation_constraints.rdb$constraint_type IS NULL
ORDER BY index_name, ic.rdb$field_position
"""
c = connection.execute(qry, [self.denormalize_name(table_name)])
indexes = util.defaultdict(dict)
for row in c:
indexrec = indexes[row['index_name']]
if 'name' not in indexrec:
indexrec['name'] = self.normalize_name(row['index_name'])
indexrec['column_names'] = []
indexrec['unique'] = bool(row['unique_flag'])
indexrec['column_names'].append(
self.normalize_name(row['field_name']))
return list(indexes.values())
|
|
import os
import sys
import textwrap
import pytest
from tests.lib import (
assert_all_changes, pyversion, _create_test_package,
_change_test_package_version,
)
from tests.lib.local_repos import local_checkout
def test_no_upgrade_unless_requested(script):
"""
No upgrade if not specifically requested.
"""
script.pip('install', 'INITools==0.1', expect_error=True)
result = script.pip('install', 'INITools', expect_error=True)
assert not result.files_created, (
'pip install INITools upgraded when it should not have'
)
@pytest.mark.network
def test_upgrade_to_specific_version(script):
"""
It does upgrade to specific version requested.
"""
script.pip('install', 'INITools==0.1', expect_error=True)
result = script.pip('install', 'INITools==0.2', expect_error=True)
assert result.files_created, (
'pip install with specific version did not upgrade'
)
assert (
script.site_packages / 'INITools-0.1-py%s.egg-info' %
pyversion in result.files_deleted
)
assert (
script.site_packages / 'INITools-0.2-py%s.egg-info' %
pyversion in result.files_created
)
@pytest.mark.network
def test_upgrade_if_requested(script):
"""
And it does upgrade if requested.
"""
script.pip('install', 'INITools==0.1', expect_error=True)
result = script.pip('install', '--upgrade', 'INITools', expect_error=True)
assert result.files_created, 'pip install --upgrade did not upgrade'
assert (
script.site_packages / 'INITools-0.1-py%s.egg-info' %
pyversion not in result.files_created
)
def test_upgrade_with_newest_already_installed(script, data):
"""
If the newest version of a package is already installed, the package should
not be reinstalled and the user should be informed.
"""
script.pip('install', '-f', data.find_links, '--no-index', 'simple')
result = script.pip(
'install', '--upgrade', '-f', data.find_links, '--no-index', 'simple'
)
assert not result.files_created, 'simple upgraded when it should not have'
assert 'already up-to-date' in result.stdout, result.stdout
@pytest.mark.network
def test_upgrade_force_reinstall_newest(script):
"""
Force reinstallation of a package even if it is already at its newest
version if --force-reinstall is supplied.
"""
result = script.pip('install', 'INITools')
assert script.site_packages / 'initools' in result.files_created, (
sorted(result.files_created.keys())
)
result2 = script.pip(
'install', '--upgrade', '--force-reinstall', 'INITools'
)
assert result2.files_updated, 'upgrade to INITools 0.3 failed'
result3 = script.pip('uninstall', 'initools', '-y', expect_error=True)
assert_all_changes(result, result3, [script.venv / 'build', 'cache'])
@pytest.mark.network
def test_uninstall_before_upgrade(script):
"""
Automatic uninstall-before-upgrade.
"""
result = script.pip('install', 'INITools==0.2', expect_error=True)
assert script.site_packages / 'initools' in result.files_created, (
sorted(result.files_created.keys())
)
result2 = script.pip('install', 'INITools==0.3', expect_error=True)
assert result2.files_created, 'upgrade to INITools 0.3 failed'
result3 = script.pip('uninstall', 'initools', '-y', expect_error=True)
assert_all_changes(result, result3, [script.venv / 'build', 'cache'])
@pytest.mark.network
def test_uninstall_before_upgrade_from_url(script):
"""
Automatic uninstall-before-upgrade from URL.
"""
result = script.pip('install', 'INITools==0.2', expect_error=True)
assert script.site_packages / 'initools' in result.files_created, (
sorted(result.files_created.keys())
)
result2 = script.pip(
'install',
'http://pypi.python.org/packages/source/I/INITools/INITools-'
'0.3.tar.gz',
expect_error=True,
)
assert result2.files_created, 'upgrade to INITools 0.3 failed'
result3 = script.pip('uninstall', 'initools', '-y', expect_error=True)
assert_all_changes(result, result3, [script.venv / 'build', 'cache'])
@pytest.mark.network
def test_upgrade_to_same_version_from_url(script):
"""
When installing from a URL the same version that is already installed, no
need to uninstall and reinstall if --upgrade is not specified.
"""
result = script.pip('install', 'INITools==0.3', expect_error=True)
assert script.site_packages / 'initools' in result.files_created, (
sorted(result.files_created.keys())
)
result2 = script.pip(
'install',
'http://pypi.python.org/packages/source/I/INITools/INITools-'
'0.3.tar.gz',
expect_error=True,
)
assert not result2.files_updated, 'INITools 0.3 reinstalled same version'
result3 = script.pip('uninstall', 'initools', '-y', expect_error=True)
assert_all_changes(result, result3, [script.venv / 'build', 'cache'])
@pytest.mark.network
def test_upgrade_from_reqs_file(script):
"""
Upgrade from a requirements file.
"""
script.scratch_path.join("test-req.txt").write(textwrap.dedent("""\
PyLogo<0.4
# and something else to test out:
INITools==0.3
"""))
install_result = script.pip(
'install', '-r', script.scratch_path / 'test-req.txt'
)
script.scratch_path.join("test-req.txt").write(textwrap.dedent("""\
PyLogo
# and something else to test out:
INITools
"""))
script.pip(
'install', '--upgrade', '-r', script.scratch_path / 'test-req.txt'
)
uninstall_result = script.pip(
'uninstall', '-r', script.scratch_path / 'test-req.txt', '-y'
)
assert_all_changes(
install_result,
uninstall_result,
[script.venv / 'build', 'cache', script.scratch / 'test-req.txt'],
)
def test_uninstall_rollback(script, data):
"""
Test uninstall-rollback (using test package with a setup.py
crafted to fail on install).
"""
result = script.pip(
'install', '-f', data.find_links, '--no-index', 'broken==0.1'
)
assert script.site_packages / 'broken.py' in result.files_created, list(
result.files_created.keys()
)
result2 = script.pip(
'install', '-f', data.find_links, '--no-index', 'broken===0.2broken',
expect_error=True,
)
assert result2.returncode == 1, str(result2)
assert script.run(
'python', '-c', "import broken; print(broken.VERSION)"
).stdout == '0.1\n'
assert_all_changes(
result.files_after,
result2,
[script.venv / 'build'],
)
# Issue #530 - temporarily disable flaky test
@pytest.mark.skipif
def test_editable_git_upgrade(script):
"""
Test installing an editable git package from a repository, upgrading the
repository, installing again, and check it gets the newer version
"""
version_pkg_path = _create_test_package(script)
script.pip(
'install', '-e',
'%s#egg=version_pkg' % ('git+file://' + version_pkg_path),
)
version = script.run('version_pkg')
assert '0.1' in version.stdout
_change_test_package_version(script, version_pkg_path)
script.pip(
'install', '-e',
'%s#egg=version_pkg' % ('git+file://' + version_pkg_path),
)
version2 = script.run('version_pkg')
assert 'some different version' in version2.stdout, (
"Output: %s" % (version2.stdout)
)
@pytest.mark.network
def test_should_not_install_always_from_cache(script):
"""
If there is an old cached package, pip should download the newer version
Related to issue #175
"""
script.pip('install', 'INITools==0.2', expect_error=True)
script.pip('uninstall', '-y', 'INITools')
result = script.pip('install', 'INITools==0.1', expect_error=True)
assert (
script.site_packages / 'INITools-0.2-py%s.egg-info' %
pyversion not in result.files_created
)
assert (
script.site_packages / 'INITools-0.1-py%s.egg-info' %
pyversion in result.files_created
)
@pytest.mark.network
def test_install_with_ignoreinstalled_requested(script):
"""
Test old conflicting package is completely ignored
"""
script.pip('install', 'INITools==0.1', expect_error=True)
result = script.pip('install', '-I', 'INITools==0.3', expect_error=True)
assert result.files_created, 'pip install -I did not install'
# both the old and new metadata should be present.
assert os.path.exists(
script.site_packages_path / 'INITools-0.1-py%s.egg-info' % pyversion
)
assert os.path.exists(
script.site_packages_path / 'INITools-0.3-py%s.egg-info' % pyversion
)
@pytest.mark.network
def test_upgrade_vcs_req_with_no_dists_found(script, tmpdir):
"""It can upgrade a VCS requirement that has no distributions otherwise."""
req = "%s#egg=pip-test-package" % local_checkout(
"git+http://github.com/pypa/pip-test-package.git",
tmpdir.join("cache"),
)
script.pip("install", req)
result = script.pip("install", "-U", req)
assert not result.returncode
@pytest.mark.network
def test_upgrade_vcs_req_with_dist_found(script):
"""It can upgrade a VCS requirement that has distributions on the index."""
# TODO(pnasrat) Using local_checkout fails on windows - oddness with the
# test path urls/git.
req = (
"%s#egg=pretend" %
(
"git+git://github.com/alex/pretend@e7f26ad7dbcb4a02a4995aade4"
"743aad47656b27"
)
)
script.pip("install", req, expect_stderr=True)
result = script.pip("install", "-U", req, expect_stderr=True)
assert "pypi.python.org" not in result.stdout, result.stdout
class TestUpgradeSetuptools(object):
"""
Tests for upgrading to setuptools (using pip from src tree)
The tests use a *fixed* set of packages from our test packages dir
note: virtualenv-1.9.1 contains distribute-0.6.34
note: virtualenv-1.10 contains setuptools-0.9.7
"""
def prep_ve(self, script, version, pip_src, distribute=False):
self.script = script
self.script.pip_install_local('virtualenv==%s' % version)
args = ['virtualenv', self.script.scratch_path / 'VE']
if distribute:
args.insert(1, '--distribute')
if version == "1.9.1" and not distribute:
# setuptools 0.6 didn't support PYTHONDONTWRITEBYTECODE
del self.script.environ["PYTHONDONTWRITEBYTECODE"]
self.script.run(*args)
if sys.platform == 'win32':
bindir = "Scripts"
else:
bindir = "bin"
self.ve_bin = self.script.scratch_path / 'VE' / bindir
self.script.run(self.ve_bin / 'pip', 'uninstall', '-y', 'pip')
self.script.run(
self.ve_bin / 'python', 'setup.py', 'install',
cwd=pip_src,
expect_stderr=True,
)
@pytest.mark.skipif("sys.version_info >= (3,0)")
def test_py2_from_setuptools_6_to_setuptools_7(
self, script, data, virtualenv):
self.prep_ve(script, '1.9.1', virtualenv.pip_source_dir)
result = self.script.run(
self.ve_bin / 'pip', 'install', '--no-use-wheel', '--no-index',
'--find-links=%s' % data.find_links, '-U', 'setuptools'
)
assert (
"Found existing installation: setuptools 0.6rc11" in result.stdout
)
result = self.script.run(self.ve_bin / 'pip', 'list')
assert "setuptools (0.9.8)" in result.stdout
def test_py2_py3_from_distribute_6_to_setuptools_7(
self, script, data, virtualenv):
self.prep_ve(
script, '1.9.1', virtualenv.pip_source_dir, distribute=True
)
result = self.script.run(
self.ve_bin / 'pip', 'install', '--no-index',
'--find-links=%s' % data.find_links, '-U', 'setuptools'
)
assert (
"Found existing installation: distribute 0.6.34" in result.stdout
)
result = self.script.run(self.ve_bin / 'pip', 'list')
assert "setuptools (0.9.8)" in result.stdout
assert "distribute (0.7.3)" not in result.stdout
def test_from_setuptools_7_to_setuptools_7(self, script, data, virtualenv):
self.prep_ve(script, '1.10', virtualenv.pip_source_dir)
result = self.script.run(
self.ve_bin / 'pip', 'install', '--no-index',
'--find-links=%s' % data.find_links, '-U', 'setuptools'
)
assert "Found existing installation: setuptools 0.9.7" in result.stdout
result = self.script.run(self.ve_bin / 'pip', 'list')
assert "setuptools (0.9.8)" in result.stdout
def test_from_setuptools_7_to_setuptools_7_using_wheel(
self, script, data, virtualenv):
self.prep_ve(script, '1.10', virtualenv.pip_source_dir)
result = self.script.run(
self.ve_bin / 'pip', 'install', '--use-wheel', '--no-index',
'--find-links=%s' % data.find_links, '-U', 'setuptools'
)
assert "Found existing installation: setuptools 0.9.7" in result.stdout
# only wheels use dist-info
assert 'setuptools-0.9.8.dist-info' in str(result.files_created)
result = self.script.run(self.ve_bin / 'pip', 'list')
assert "setuptools (0.9.8)" in result.stdout
# disabling intermittent travis failure:
# https://github.com/pypa/pip/issues/1379
@pytest.mark.skipif("hasattr(sys, 'pypy_version_info')")
def test_from_setuptools_7_to_setuptools_7_with_distribute_7_installed(
self, script, data, virtualenv):
self.prep_ve(
script, '1.9.1', virtualenv.pip_source_dir, distribute=True
)
result = self.script.run(
self.ve_bin / 'pip', 'install', '--no-index',
'--find-links=%s' % data.find_links, '-U', 'setuptools'
)
result = self.script.run(
self.ve_bin / 'pip', 'install', '--no-index',
'--find-links=%s' % data.find_links, 'setuptools==0.9.6'
)
result = self.script.run(self.ve_bin / 'pip', 'list')
assert "setuptools (0.9.6)" in result.stdout
assert "distribute (0.7.3)" not in result.stdout
result = self.script.run(
self.ve_bin / 'pip', 'install', '--no-index',
'--find-links=%s' % data.find_links, '-U', 'setuptools'
)
assert "Found existing installation: setuptools 0.9.6" in result.stdout
result = self.script.run(self.ve_bin / 'pip', 'list')
assert "setuptools (0.9.8)" in result.stdout
|
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from __future__ import with_statement
import hashlib
import logging
import os
import pipes
import random
import shutil
import string
import subprocess
import sys
import tarfile
import tempfile
import textwrap
import time
import urllib2
import warnings
from datetime import datetime
from optparse import OptionParser
from sys import stderr
SPARK_EC2_VERSION = "1.3.1"
SPARK_EC2_DIR = os.path.dirname(os.path.realpath(__file__))
VALID_SPARK_VERSIONS = set([
"0.7.3",
"0.8.0",
"0.8.1",
"0.9.0",
"0.9.1",
"0.9.2",
"1.0.0",
"1.0.1",
"1.0.2",
"1.1.0",
"1.1.1",
"1.2.0",
"1.2.1",
"1.3.0",
"1.3.1",
])
DEFAULT_SPARK_VERSION = SPARK_EC2_VERSION
DEFAULT_SPARK_GITHUB_REPO = "https://github.com/apache/spark"
MESOS_SPARK_EC2_BRANCH = "branch-1.4"
# A URL prefix from which to fetch AMI information
AMI_PREFIX = "https://raw.github.com/mesos/spark-ec2/{b}/ami-list".format(b=MESOS_SPARK_EC2_BRANCH)
def setup_boto():
# Download Boto if it's not already present in the SPARK_EC2_DIR/lib folder:
version = "boto-2.34.0"
md5 = "5556223d2d0cc4d06dd4829e671dcecd"
url = "https://pypi.python.org/packages/source/b/boto/%s.tar.gz" % version
lib_dir = os.path.join(SPARK_EC2_DIR, "lib")
if not os.path.exists(lib_dir):
os.mkdir(lib_dir)
boto_lib_dir = os.path.join(lib_dir, version)
if not os.path.isdir(boto_lib_dir):
tgz_file_path = os.path.join(lib_dir, "%s.tar.gz" % version)
print "Downloading Boto from PyPi"
download_stream = urllib2.urlopen(url)
with open(tgz_file_path, "wb") as tgz_file:
tgz_file.write(download_stream.read())
with open(tgz_file_path) as tar:
if hashlib.md5(tar.read()).hexdigest() != md5:
print >> stderr, "ERROR: Got wrong md5sum for Boto"
sys.exit(1)
tar = tarfile.open(tgz_file_path)
tar.extractall(path=lib_dir)
tar.close()
os.remove(tgz_file_path)
print "Finished downloading Boto"
sys.path.insert(0, boto_lib_dir)
setup_boto()
import boto
from boto.ec2.blockdevicemapping import BlockDeviceMapping, BlockDeviceType, EBSBlockDeviceType
from boto import ec2
class UsageError(Exception):
pass
# Configure and parse our command-line arguments
def parse_args():
parser = OptionParser(
prog="spark-ec2",
version="%prog {v}".format(v=SPARK_EC2_VERSION),
usage="%prog [options] <action> <cluster_name>\n\n"
+ "<action> can be: launch, destroy, login, stop, start, get-master, reboot-slaves")
parser.add_option(
"-s", "--slaves", type="int", default=1,
help="Number of slaves to launch (default: %default)")
parser.add_option(
"-w", "--wait", type="int",
help="DEPRECATED (no longer necessary) - Seconds to wait for nodes to start")
parser.add_option(
"-k", "--key-pair",
help="Key pair to use on instances")
parser.add_option(
"-i", "--identity-file",
help="SSH private key file to use for logging into instances")
parser.add_option(
"-t", "--instance-type", default="m1.large",
help="Type of instance to launch (default: %default). " +
"WARNING: must be 64-bit; small instances won't work")
parser.add_option(
"-m", "--master-instance-type", default="",
help="Master instance type (leave empty for same as instance-type)")
parser.add_option(
"-r", "--region", default="us-east-1",
help="EC2 region zone to launch instances in")
parser.add_option(
"-z", "--zone", default="",
help="Availability zone to launch instances in, or 'all' to spread " +
"slaves across multiple (an additional $0.01/Gb for bandwidth" +
"between zones applies) (default: a single zone chosen at random)")
parser.add_option("-a", "--ami", help="Amazon Machine Image ID to use")
parser.add_option(
"-v", "--spark-version", default=DEFAULT_SPARK_VERSION,
help="Version of Spark to use: 'X.Y.Z' or a specific git hash (default: %default)")
parser.add_option(
"--spark-git-repo",
default=DEFAULT_SPARK_GITHUB_REPO,
help="Github repo from which to checkout supplied commit hash (default: %default)")
parser.add_option(
"--hadoop-major-version", default="1",
help="Major version of Hadoop (default: %default)")
parser.add_option(
"-D", metavar="[ADDRESS:]PORT", dest="proxy_port",
help="Use SSH dynamic port forwarding to create a SOCKS proxy at " +
"the given local address (for use with login)")
parser.add_option(
"--resume", action="store_true", default=False,
help="Resume installation on a previously launched cluster " +
"(for debugging)")
parser.add_option(
"--ebs-vol-size", metavar="SIZE", type="int", default=0,
help="Size (in GB) of each EBS volume.")
parser.add_option(
"--ebs-vol-type", default="standard",
help="EBS volume type (e.g. 'gp2', 'standard').")
parser.add_option(
"--ebs-vol-num", type="int", default=1,
help="Number of EBS volumes to attach to each node as /vol[x]. " +
"The volumes will be deleted when the instances terminate. " +
"Only possible on EBS-backed AMIs. " +
"EBS volumes are only attached if --ebs-vol-size > 0." +
"Only support up to 8 EBS volumes.")
parser.add_option("--placement-group", type="string", default=None,
help="Which placement group to try and launch " +
"instances into. Assumes placement group is already " +
"created.")
parser.add_option(
"--swap", metavar="SWAP", type="int", default=1024,
help="Swap space to set up per node, in MB (default: %default)")
parser.add_option(
"--spot-price", metavar="PRICE", type="float",
help="If specified, launch slaves as spot instances with the given " +
"maximum price (in dollars)")
parser.add_option(
"--ganglia", action="store_true", default=True,
help="Setup Ganglia monitoring on cluster (default: %default). NOTE: " +
"the Ganglia page will be publicly accessible")
parser.add_option(
"--no-ganglia", action="store_false", dest="ganglia",
help="Disable Ganglia monitoring for the cluster")
parser.add_option(
"-u", "--user", default="root",
help="The SSH user you want to connect as (default: %default)")
parser.add_option(
"--delete-groups", action="store_true", default=False,
help="When destroying a cluster, delete the security groups that were created")
parser.add_option(
"--use-existing-master", action="store_true", default=False,
help="Launch fresh slaves, but use an existing stopped master if possible")
parser.add_option(
"--worker-instances", type="int", default=1,
help="Number of instances per worker: variable SPARK_WORKER_INSTANCES (default: %default)")
parser.add_option(
"--master-opts", type="string", default="",
help="Extra options to give to master through SPARK_MASTER_OPTS variable " +
"(e.g -Dspark.worker.timeout=180)")
parser.add_option(
"--user-data", type="string", default="",
help="Path to a user-data file (most AMI's interpret this as an initialization script)")
parser.add_option(
"--authorized-address", type="string", default="0.0.0.0/0",
help="Address to authorize on created security groups (default: %default)")
parser.add_option(
"--additional-security-group", type="string", default="",
help="Additional security group to place the machines in")
parser.add_option(
"--copy-aws-credentials", action="store_true", default=False,
help="Add AWS credentials to hadoop configuration to allow Spark to access S3")
parser.add_option(
"--subnet-id", default=None, help="VPC subnet to launch instances in")
parser.add_option(
"--vpc-id", default=None, help="VPC to launch instances in")
(opts, args) = parser.parse_args()
if len(args) != 2:
parser.print_help()
sys.exit(1)
(action, cluster_name) = args
# Boto config check
# http://boto.cloudhackers.com/en/latest/boto_config_tut.html
home_dir = os.getenv('HOME')
if home_dir is None or not os.path.isfile(home_dir + '/.boto'):
if not os.path.isfile('/etc/boto.cfg'):
if os.getenv('AWS_ACCESS_KEY_ID') is None:
print >> stderr, ("ERROR: The environment variable AWS_ACCESS_KEY_ID " +
"must be set")
sys.exit(1)
if os.getenv('AWS_SECRET_ACCESS_KEY') is None:
print >> stderr, ("ERROR: The environment variable AWS_SECRET_ACCESS_KEY " +
"must be set")
sys.exit(1)
return (opts, action, cluster_name)
# Get the EC2 security group of the given name, creating it if it doesn't exist
def get_or_make_group(conn, name, vpc_id):
groups = conn.get_all_security_groups()
group = [g for g in groups if g.name == name]
if len(group) > 0:
return group[0]
else:
print "Creating security group " + name
return conn.create_security_group(name, "Spark EC2 group", vpc_id)
def get_validate_spark_version(version, repo):
if "." in version:
version = version.replace("v", "")
if version not in VALID_SPARK_VERSIONS:
print >> stderr, "Don't know about Spark version: {v}".format(v=version)
sys.exit(1)
return version
else:
github_commit_url = "{repo}/commit/{commit_hash}".format(repo=repo, commit_hash=version)
request = urllib2.Request(github_commit_url)
request.get_method = lambda: 'HEAD'
try:
response = urllib2.urlopen(request)
except urllib2.HTTPError, e:
print >> stderr, "Couldn't validate Spark commit: {url}".format(url=github_commit_url)
print >> stderr, "Received HTTP response code of {code}.".format(code=e.code)
sys.exit(1)
return version
# Check whether a given EC2 instance object is in a state we consider active,
# i.e. not terminating or terminated. We count both stopping and stopped as
# active since we can restart stopped clusters.
def is_active(instance):
return (instance.state in ['pending', 'running', 'stopping', 'stopped'])
# Attempt to resolve an appropriate AMI given the architecture and region of the request.
# Source: http://aws.amazon.com/amazon-linux-ami/instance-type-matrix/
# Last Updated: 2014-06-20
# For easy maintainability, please keep this manually-inputted dictionary sorted by key.
def get_spark_ami(opts):
instance_types = {
"c1.medium": "pvm",
"c1.xlarge": "pvm",
"c3.2xlarge": "pvm",
"c3.4xlarge": "pvm",
"c3.8xlarge": "pvm",
"c3.large": "pvm",
"c3.xlarge": "pvm",
"cc1.4xlarge": "hvm",
"cc2.8xlarge": "hvm",
"cg1.4xlarge": "hvm",
"cr1.8xlarge": "hvm",
"hi1.4xlarge": "pvm",
"hs1.8xlarge": "pvm",
"i2.2xlarge": "hvm",
"i2.4xlarge": "hvm",
"i2.8xlarge": "hvm",
"i2.xlarge": "hvm",
"m1.large": "pvm",
"m1.medium": "pvm",
"m1.small": "pvm",
"m1.xlarge": "pvm",
"m2.2xlarge": "pvm",
"m2.4xlarge": "pvm",
"m2.xlarge": "pvm",
"m3.2xlarge": "hvm",
"m3.large": "hvm",
"m3.medium": "hvm",
"m3.xlarge": "hvm",
"r3.2xlarge": "hvm",
"r3.4xlarge": "hvm",
"r3.8xlarge": "hvm",
"r3.large": "hvm",
"r3.xlarge": "hvm",
"t1.micro": "pvm",
"t2.medium": "hvm",
"t2.micro": "hvm",
"t2.small": "hvm",
}
if opts.instance_type in instance_types:
instance_type = instance_types[opts.instance_type]
else:
instance_type = "pvm"
print >> stderr,\
"Don't recognize %s, assuming type is pvm" % opts.instance_type
ami_path = "%s/%s/%s" % (AMI_PREFIX, opts.region, instance_type)
try:
ami = urllib2.urlopen(ami_path).read().strip()
print "Spark AMI: " + ami
except:
print >> stderr, "Could not resolve AMI at: " + ami_path
sys.exit(1)
return ami
# Launch a cluster of the given name, by setting up its security groups,
# and then starting new instances in them.
# Returns a tuple of EC2 reservation objects for the master and slaves
# Fails if there already instances running in the cluster's groups.
def launch_cluster(conn, opts, cluster_name):
if opts.identity_file is None:
print >> stderr, "ERROR: Must provide an identity file (-i) for ssh connections."
sys.exit(1)
if opts.key_pair is None:
print >> stderr, "ERROR: Must provide a key pair name (-k) to use on instances."
sys.exit(1)
user_data_content = None
if opts.user_data:
with open(opts.user_data) as user_data_file:
user_data_content = user_data_file.read()
print "Setting up security groups..."
master_group = get_or_make_group(conn, cluster_name + "-master", opts.vpc_id)
slave_group = get_or_make_group(conn, cluster_name + "-slaves", opts.vpc_id)
authorized_address = opts.authorized_address
if master_group.rules == []: # Group was just now created
if opts.vpc_id is None:
master_group.authorize(src_group=master_group)
master_group.authorize(src_group=slave_group)
else:
master_group.authorize(ip_protocol='icmp', from_port=-1, to_port=-1,
src_group=master_group)
master_group.authorize(ip_protocol='tcp', from_port=0, to_port=65535,
src_group=master_group)
master_group.authorize(ip_protocol='udp', from_port=0, to_port=65535,
src_group=master_group)
master_group.authorize(ip_protocol='icmp', from_port=-1, to_port=-1,
src_group=slave_group)
master_group.authorize(ip_protocol='tcp', from_port=0, to_port=65535,
src_group=slave_group)
master_group.authorize(ip_protocol='udp', from_port=0, to_port=65535,
src_group=slave_group)
master_group.authorize('tcp', 22, 22, authorized_address)
master_group.authorize('tcp', 8080, 8081, authorized_address)
master_group.authorize('tcp', 18080, 18080, authorized_address)
master_group.authorize('tcp', 19999, 19999, authorized_address)
master_group.authorize('tcp', 50030, 50030, authorized_address)
master_group.authorize('tcp', 50070, 50070, authorized_address)
master_group.authorize('tcp', 60070, 60070, authorized_address)
master_group.authorize('tcp', 4040, 4045, authorized_address)
if opts.ganglia:
master_group.authorize('tcp', 5080, 5080, authorized_address)
if slave_group.rules == []: # Group was just now created
if opts.vpc_id is None:
slave_group.authorize(src_group=master_group)
slave_group.authorize(src_group=slave_group)
else:
slave_group.authorize(ip_protocol='icmp', from_port=-1, to_port=-1,
src_group=master_group)
slave_group.authorize(ip_protocol='tcp', from_port=0, to_port=65535,
src_group=master_group)
slave_group.authorize(ip_protocol='udp', from_port=0, to_port=65535,
src_group=master_group)
slave_group.authorize(ip_protocol='icmp', from_port=-1, to_port=-1,
src_group=slave_group)
slave_group.authorize(ip_protocol='tcp', from_port=0, to_port=65535,
src_group=slave_group)
slave_group.authorize(ip_protocol='udp', from_port=0, to_port=65535,
src_group=slave_group)
slave_group.authorize('tcp', 22, 22, authorized_address)
slave_group.authorize('tcp', 8080, 8081, authorized_address)
slave_group.authorize('tcp', 50060, 50060, authorized_address)
slave_group.authorize('tcp', 50075, 50075, authorized_address)
slave_group.authorize('tcp', 60060, 60060, authorized_address)
slave_group.authorize('tcp', 60075, 60075, authorized_address)
# Check if instances are already running in our groups
existing_masters, existing_slaves = get_existing_cluster(conn, opts, cluster_name,
die_on_error=False)
if existing_slaves or (existing_masters and not opts.use_existing_master):
print >> stderr, ("ERROR: There are already instances running in " +
"group %s or %s" % (master_group.name, slave_group.name))
sys.exit(1)
# Figure out Spark AMI
if opts.ami is None:
opts.ami = get_spark_ami(opts)
# we use group ids to work around https://github.com/boto/boto/issues/350
additional_group_ids = []
if opts.additional_security_group:
additional_group_ids = [sg.id
for sg in conn.get_all_security_groups()
if opts.additional_security_group in (sg.name, sg.id)]
print "Launching instances..."
try:
image = conn.get_all_images(image_ids=[opts.ami])[0]
except:
print >> stderr, "Could not find AMI " + opts.ami
sys.exit(1)
# Create block device mapping so that we can add EBS volumes if asked to.
# The first drive is attached as /dev/sds, 2nd as /dev/sdt, ... /dev/sdz
block_map = BlockDeviceMapping()
if opts.ebs_vol_size > 0:
for i in range(opts.ebs_vol_num):
device = EBSBlockDeviceType()
device.size = opts.ebs_vol_size
device.volume_type = opts.ebs_vol_type
device.delete_on_termination = True
block_map["/dev/sd" + chr(ord('s') + i)] = device
# AWS ignores the AMI-specified block device mapping for M3 (see SPARK-3342).
if opts.instance_type.startswith('m3.'):
for i in range(get_num_disks(opts.instance_type)):
dev = BlockDeviceType()
dev.ephemeral_name = 'ephemeral%d' % i
# The first ephemeral drive is /dev/sdb.
name = '/dev/sd' + string.letters[i + 1]
block_map[name] = dev
# Launch slaves
if opts.spot_price is not None:
# Launch spot instances with the requested price
print ("Requesting %d slaves as spot instances with price $%.3f" %
(opts.slaves, opts.spot_price))
zones = get_zones(conn, opts)
num_zones = len(zones)
i = 0
my_req_ids = []
for zone in zones:
num_slaves_this_zone = get_partition(opts.slaves, num_zones, i)
slave_reqs = conn.request_spot_instances(
price=opts.spot_price,
image_id=opts.ami,
launch_group="launch-group-%s" % cluster_name,
placement=zone,
count=num_slaves_this_zone,
key_name=opts.key_pair,
security_group_ids=[slave_group.id] + additional_group_ids,
instance_type=opts.instance_type,
block_device_map=block_map,
subnet_id=opts.subnet_id,
placement_group=opts.placement_group,
user_data=user_data_content)
my_req_ids += [req.id for req in slave_reqs]
i += 1
print "Waiting for spot instances to be granted..."
try:
while True:
time.sleep(10)
reqs = conn.get_all_spot_instance_requests()
id_to_req = {}
for r in reqs:
id_to_req[r.id] = r
active_instance_ids = []
for i in my_req_ids:
if i in id_to_req and id_to_req[i].state == "active":
active_instance_ids.append(id_to_req[i].instance_id)
if len(active_instance_ids) == opts.slaves:
print "All %d slaves granted" % opts.slaves
reservations = conn.get_all_reservations(active_instance_ids)
slave_nodes = []
for r in reservations:
slave_nodes += r.instances
break
else:
print "%d of %d slaves granted, waiting longer" % (
len(active_instance_ids), opts.slaves)
except:
print "Canceling spot instance requests"
conn.cancel_spot_instance_requests(my_req_ids)
# Log a warning if any of these requests actually launched instances:
(master_nodes, slave_nodes) = get_existing_cluster(
conn, opts, cluster_name, die_on_error=False)
running = len(master_nodes) + len(slave_nodes)
if running:
print >> stderr, ("WARNING: %d instances are still running" % running)
sys.exit(0)
else:
# Launch non-spot instances
zones = get_zones(conn, opts)
num_zones = len(zones)
i = 0
slave_nodes = []
for zone in zones:
num_slaves_this_zone = get_partition(opts.slaves, num_zones, i)
if num_slaves_this_zone > 0:
slave_res = image.run(key_name=opts.key_pair,
security_group_ids=[slave_group.id] + additional_group_ids,
instance_type=opts.instance_type,
placement=zone,
min_count=num_slaves_this_zone,
max_count=num_slaves_this_zone,
block_device_map=block_map,
subnet_id=opts.subnet_id,
placement_group=opts.placement_group,
user_data=user_data_content)
slave_nodes += slave_res.instances
print "Launched %d slaves in %s, regid = %s" % (num_slaves_this_zone,
zone, slave_res.id)
i += 1
# Launch or resume masters
if existing_masters:
print "Starting master..."
for inst in existing_masters:
if inst.state not in ["shutting-down", "terminated"]:
inst.start()
master_nodes = existing_masters
else:
master_type = opts.master_instance_type
if master_type == "":
master_type = opts.instance_type
if opts.zone == 'all':
opts.zone = random.choice(conn.get_all_zones()).name
master_res = image.run(key_name=opts.key_pair,
security_group_ids=[master_group.id] + additional_group_ids,
instance_type=master_type,
placement=opts.zone,
min_count=1,
max_count=1,
block_device_map=block_map,
subnet_id=opts.subnet_id,
placement_group=opts.placement_group,
user_data=user_data_content)
master_nodes = master_res.instances
print "Launched master in %s, regid = %s" % (zone, master_res.id)
# This wait time corresponds to SPARK-4983
print "Waiting for AWS to propagate instance metadata..."
time.sleep(5)
# Give the instances descriptive names
for master in master_nodes:
master.add_tag(
key='Name',
value='{cn}-master-{iid}'.format(cn=cluster_name, iid=master.id))
for slave in slave_nodes:
slave.add_tag(
key='Name',
value='{cn}-slave-{iid}'.format(cn=cluster_name, iid=slave.id))
# Return all the instances
return (master_nodes, slave_nodes)
# Get the EC2 instances in an existing cluster if available.
# Returns a tuple of lists of EC2 instance objects for the masters and slaves
def get_existing_cluster(conn, opts, cluster_name, die_on_error=True):
print "Searching for existing cluster " + cluster_name + "..."
reservations = conn.get_all_reservations()
master_nodes = []
slave_nodes = []
for res in reservations:
active = [i for i in res.instances if is_active(i)]
for inst in active:
group_names = [g.name for g in inst.groups]
if (cluster_name + "-master") in group_names:
master_nodes.append(inst)
elif (cluster_name + "-slaves") in group_names:
slave_nodes.append(inst)
if any((master_nodes, slave_nodes)):
print "Found %d master(s), %d slaves" % (len(master_nodes), len(slave_nodes))
if master_nodes != [] or not die_on_error:
return (master_nodes, slave_nodes)
else:
if master_nodes == [] and slave_nodes != []:
print >> sys.stderr, "ERROR: Could not find master in group " + cluster_name + "-master"
else:
print >> sys.stderr, "ERROR: Could not find any existing cluster"
sys.exit(1)
# Deploy configuration files and run setup scripts on a newly launched
# or started EC2 cluster.
def setup_cluster(conn, master_nodes, slave_nodes, opts, deploy_ssh_key):
master = master_nodes[0].public_dns_name
if deploy_ssh_key:
print "Generating cluster's SSH key on master..."
key_setup = """
[ -f ~/.ssh/id_rsa ] ||
(ssh-keygen -q -t rsa -N '' -f ~/.ssh/id_rsa &&
cat ~/.ssh/id_rsa.pub >> ~/.ssh/authorized_keys)
"""
ssh(master, opts, key_setup)
dot_ssh_tar = ssh_read(master, opts, ['tar', 'c', '.ssh'])
print "Transferring cluster's SSH key to slaves..."
for slave in slave_nodes:
print slave.public_dns_name
ssh_write(slave.public_dns_name, opts, ['tar', 'x'], dot_ssh_tar)
modules = ['spark', 'ephemeral-hdfs', 'persistent-hdfs',
'mapreduce', 'spark-standalone', 'tachyon', 'mahout']
if opts.hadoop_major_version == "1":
modules = filter(lambda x: x != "mapreduce", modules)
if opts.ganglia:
modules.append('ganglia')
# NOTE: We should clone the repository before running deploy_files to
# prevent ec2-variables.sh from being overwritten
ssh(
host=master,
opts=opts,
command="rm -rf spark-ec2"
+ " && "
+ "git clone https://github.com/Verdad/spark-ec2.git -b {b}".format(b=MESOS_SPARK_EC2_BRANCH)
)
print "Deploying files to master..."
deploy_files(
conn=conn,
root_dir=SPARK_EC2_DIR + "/" + "deploy.generic",
opts=opts,
master_nodes=master_nodes,
slave_nodes=slave_nodes,
modules=modules
)
print "Running setup on master..."
setup_spark_cluster(master, opts)
print "Done!"
def setup_spark_cluster(master, opts):
ssh(master, opts, "chmod u+x spark-ec2/setup.sh")
ssh(master, opts, "spark-ec2/setup.sh")
print "Spark standalone cluster started at http://%s:8080" % master
if opts.ganglia:
print "Ganglia started at http://%s:5080/ganglia" % master
def is_ssh_available(host, opts, print_ssh_output=True):
"""
Check if SSH is available on a host.
"""
s = subprocess.Popen(
ssh_command(opts) + ['-t', '-t', '-o', 'ConnectTimeout=3',
'%s@%s' % (opts.user, host), stringify_command('true')],
stdout=subprocess.PIPE,
stderr=subprocess.STDOUT # we pipe stderr through stdout to preserve output order
)
cmd_output = s.communicate()[0] # [1] is stderr, which we redirected to stdout
if s.returncode != 0 and print_ssh_output:
# extra leading newline is for spacing in wait_for_cluster_state()
print textwrap.dedent("""\n
Warning: SSH connection error. (This could be temporary.)
Host: {h}
SSH return code: {r}
SSH output: {o}
""").format(
h=host,
r=s.returncode,
o=cmd_output.strip()
)
return s.returncode == 0
def is_cluster_ssh_available(cluster_instances, opts):
"""
Check if SSH is available on all the instances in a cluster.
"""
for i in cluster_instances:
if not is_ssh_available(host=i.public_dns_name, opts=opts):
return False
else:
return True
def wait_for_cluster_state(conn, opts, cluster_instances, cluster_state):
"""
Wait for all the instances in the cluster to reach a designated state.
cluster_instances: a list of boto.ec2.instance.Instance
cluster_state: a string representing the desired state of all the instances in the cluster
value can be 'ssh-ready' or a valid value from boto.ec2.instance.InstanceState such as
'running', 'terminated', etc.
(would be nice to replace this with a proper enum: http://stackoverflow.com/a/1695250)
"""
sys.stdout.write(
"Waiting for cluster to enter '{s}' state.".format(s=cluster_state)
)
sys.stdout.flush()
start_time = datetime.now()
num_attempts = 0
while True:
time.sleep(5 * num_attempts) # seconds
for i in cluster_instances:
i.update()
statuses = conn.get_all_instance_status(instance_ids=[i.id for i in cluster_instances])
if cluster_state == 'ssh-ready':
if all(i.state == 'running' for i in cluster_instances) and \
all(s.system_status.status == 'ok' for s in statuses) and \
all(s.instance_status.status == 'ok' for s in statuses) and \
is_cluster_ssh_available(cluster_instances, opts):
break
else:
if all(i.state == cluster_state for i in cluster_instances):
break
num_attempts += 1
sys.stdout.write(".")
sys.stdout.flush()
sys.stdout.write("\n")
end_time = datetime.now()
print "Cluster is now in '{s}' state. Waited {t} seconds.".format(
s=cluster_state,
t=(end_time - start_time).seconds
)
# Get number of local disks available for a given EC2 instance type.
def get_num_disks(instance_type):
# Source: http://docs.aws.amazon.com/AWSEC2/latest/UserGuide/InstanceStorage.html
# Last Updated: 2014-06-20
# For easy maintainability, please keep this manually-inputted dictionary sorted by key.
disks_by_instance = {
"c1.medium": 1,
"c1.xlarge": 4,
"c3.2xlarge": 2,
"c3.4xlarge": 2,
"c3.8xlarge": 2,
"c3.large": 2,
"c3.xlarge": 2,
"cc1.4xlarge": 2,
"cc2.8xlarge": 4,
"cg1.4xlarge": 2,
"cr1.8xlarge": 2,
"g2.2xlarge": 1,
"hi1.4xlarge": 2,
"hs1.8xlarge": 24,
"i2.2xlarge": 2,
"i2.4xlarge": 4,
"i2.8xlarge": 8,
"i2.xlarge": 1,
"m1.large": 2,
"m1.medium": 1,
"m1.small": 1,
"m1.xlarge": 4,
"m2.2xlarge": 1,
"m2.4xlarge": 2,
"m2.xlarge": 1,
"m3.2xlarge": 2,
"m3.large": 1,
"m3.medium": 1,
"m3.xlarge": 2,
"r3.2xlarge": 1,
"r3.4xlarge": 1,
"r3.8xlarge": 2,
"r3.large": 1,
"r3.xlarge": 1,
"t1.micro": 0,
}
if instance_type in disks_by_instance:
return disks_by_instance[instance_type]
else:
print >> stderr, ("WARNING: Don't know number of disks on instance type %s; assuming 1"
% instance_type)
return 1
# Deploy the configuration file templates in a given local directory to
# a cluster, filling in any template parameters with information about the
# cluster (e.g. lists of masters and slaves). Files are only deployed to
# the first master instance in the cluster, and we expect the setup
# script to be run on that instance to copy them to other nodes.
#
# root_dir should be an absolute path to the directory with the files we want to deploy.
def deploy_files(conn, root_dir, opts, master_nodes, slave_nodes, modules):
active_master = master_nodes[0].public_dns_name
num_disks = get_num_disks(opts.instance_type)
hdfs_data_dirs = "/mnt/ephemeral-hdfs/data"
mapred_local_dirs = "/mnt/hadoop/mrlocal"
spark_local_dirs = "/mnt/spark"
if num_disks > 1:
for i in range(2, num_disks + 1):
hdfs_data_dirs += ",/mnt%d/ephemeral-hdfs/data" % i
mapred_local_dirs += ",/mnt%d/hadoop/mrlocal" % i
spark_local_dirs += ",/mnt%d/spark" % i
cluster_url = "%s:7077" % active_master
if "." in opts.spark_version:
# Pre-built Spark deploy
spark_v = get_validate_spark_version(opts.spark_version, opts.spark_git_repo)
else:
# Spark-only custom deploy
spark_v = "%s|%s" % (opts.spark_git_repo, opts.spark_version)
template_vars = {
"master_list": '\n'.join([i.public_dns_name for i in master_nodes]),
"active_master": active_master,
"slave_list": '\n'.join([i.public_dns_name for i in slave_nodes]),
"cluster_url": cluster_url,
"hdfs_data_dirs": hdfs_data_dirs,
"mapred_local_dirs": mapred_local_dirs,
"spark_local_dirs": spark_local_dirs,
"swap": str(opts.swap),
"modules": '\n'.join(modules),
"spark_version": spark_v,
"hadoop_major_version": opts.hadoop_major_version,
"spark_worker_instances": "%d" % opts.worker_instances,
"spark_master_opts": opts.master_opts,
"emr_cluster_id":"j-28IXBFC4AEQ29",
"emr_days_to_process":"1",
"aws_default_region":opts.region
}
if opts.copy_aws_credentials:
template_vars["aws_access_key_id"] = conn.aws_access_key_id
template_vars["aws_secret_access_key"] = conn.aws_secret_access_key
else:
template_vars["aws_access_key_id"] = ""
template_vars["aws_secret_access_key"] = ""
# Create a temp directory in which we will place all the files to be
# deployed after we substitue template parameters in them
tmp_dir = tempfile.mkdtemp()
for path, dirs, files in os.walk(root_dir):
if path.find(".svn") == -1:
dest_dir = os.path.join('/', path[len(root_dir):])
local_dir = tmp_dir + dest_dir
if not os.path.exists(local_dir):
os.makedirs(local_dir)
for filename in files:
if filename[0] not in '#.~' and filename[-1] != '~':
dest_file = os.path.join(dest_dir, filename)
local_file = tmp_dir + dest_file
with open(os.path.join(path, filename)) as src:
with open(local_file, "w") as dest:
text = src.read()
for key in template_vars:
text = text.replace("{{" + key + "}}", template_vars[key])
dest.write(text)
dest.close()
# rsync the whole directory over to the master machine
command = [
'rsync', '-rv',
'-e', stringify_command(ssh_command(opts)),
"%s/" % tmp_dir,
"%s@%s:/" % (opts.user, active_master)
]
subprocess.check_call(command)
# Remove the temp directory we created above
shutil.rmtree(tmp_dir)
def stringify_command(parts):
if isinstance(parts, str):
return parts
else:
return ' '.join(map(pipes.quote, parts))
def ssh_args(opts):
parts = ['-o', 'StrictHostKeyChecking=no']
parts += ['-o', 'UserKnownHostsFile=/dev/null']
if opts.identity_file is not None:
parts += ['-i', opts.identity_file]
return parts
def ssh_command(opts):
return ['ssh'] + ssh_args(opts)
# Run a command on a host through ssh, retrying up to five times
# and then throwing an exception if ssh continues to fail.
def ssh(host, opts, command):
tries = 0
while True:
try:
return subprocess.check_call(
ssh_command(opts) + ['-t', '-t', '%s@%s' % (opts.user, host),
stringify_command(command)])
except subprocess.CalledProcessError as e:
if tries > 5:
# If this was an ssh failure, provide the user with hints.
if e.returncode == 255:
raise UsageError(
"Failed to SSH to remote host {0}.\n" +
"Please check that you have provided the correct --identity-file and " +
"--key-pair parameters and try again.".format(host))
else:
raise e
print >> stderr, \
"Error executing remote command, retrying after 30 seconds: {0}".format(e)
time.sleep(30)
tries = tries + 1
# Backported from Python 2.7 for compatiblity with 2.6 (See SPARK-1990)
def _check_output(*popenargs, **kwargs):
if 'stdout' in kwargs:
raise ValueError('stdout argument not allowed, it will be overridden.')
process = subprocess.Popen(stdout=subprocess.PIPE, *popenargs, **kwargs)
output, unused_err = process.communicate()
retcode = process.poll()
if retcode:
cmd = kwargs.get("args")
if cmd is None:
cmd = popenargs[0]
raise subprocess.CalledProcessError(retcode, cmd, output=output)
return output
def ssh_read(host, opts, command):
return _check_output(
ssh_command(opts) + ['%s@%s' % (opts.user, host), stringify_command(command)])
def ssh_write(host, opts, command, arguments):
tries = 0
while True:
proc = subprocess.Popen(
ssh_command(opts) + ['%s@%s' % (opts.user, host), stringify_command(command)],
stdin=subprocess.PIPE)
proc.stdin.write(arguments)
proc.stdin.close()
status = proc.wait()
if status == 0:
break
elif tries > 5:
raise RuntimeError("ssh_write failed with error %s" % proc.returncode)
else:
print >> stderr, \
"Error {0} while executing remote command, retrying after 30 seconds".format(status)
time.sleep(30)
tries = tries + 1
# Gets a list of zones to launch instances in
def get_zones(conn, opts):
if opts.zone == 'all':
zones = [z.name for z in conn.get_all_zones()]
else:
zones = [opts.zone]
return zones
# Gets the number of items in a partition
def get_partition(total, num_partitions, current_partitions):
num_slaves_this_zone = total / num_partitions
if (total % num_partitions) - current_partitions > 0:
num_slaves_this_zone += 1
return num_slaves_this_zone
def real_main():
(opts, action, cluster_name) = parse_args()
# Input parameter validation
get_validate_spark_version(opts.spark_version, opts.spark_git_repo)
if opts.wait is not None:
# NOTE: DeprecationWarnings are silent in 2.7+ by default.
# To show them, run Python with the -Wdefault switch.
# See: https://docs.python.org/3.5/whatsnew/2.7.html
warnings.warn(
"This option is deprecated and has no effect. "
"spark-ec2 automatically waits as long as necessary for clusters to start up.",
DeprecationWarning
)
if opts.ebs_vol_num > 8:
print >> stderr, "ebs-vol-num cannot be greater than 8"
sys.exit(1)
try:
conn = ec2.connect_to_region(opts.region)
except Exception as e:
print >> stderr, (e)
sys.exit(1)
# Select an AZ at random if it was not specified.
if opts.zone == "":
opts.zone = random.choice(conn.get_all_zones()).name
if action == "launch":
if opts.slaves <= 0:
print >> sys.stderr, "ERROR: You have to start at least 1 slave"
sys.exit(1)
if opts.resume:
(master_nodes, slave_nodes) = get_existing_cluster(conn, opts, cluster_name)
else:
(master_nodes, slave_nodes) = launch_cluster(conn, opts, cluster_name)
wait_for_cluster_state(
conn=conn,
opts=opts,
cluster_instances=(master_nodes + slave_nodes),
cluster_state='ssh-ready'
)
setup_cluster(conn, master_nodes, slave_nodes, opts, True)
elif action == "destroy":
print "Are you sure you want to destroy the cluster %s?" % cluster_name
print "The following instances will be terminated:"
(master_nodes, slave_nodes) = get_existing_cluster(
conn, opts, cluster_name, die_on_error=False)
for inst in master_nodes + slave_nodes:
print "> %s" % inst.public_dns_name
msg = "ALL DATA ON ALL NODES WILL BE LOST!!\nDestroy cluster %s (y/N): " % cluster_name
response = raw_input(msg)
if response == "y":
print "Terminating master..."
for inst in master_nodes:
inst.terminate()
print "Terminating slaves..."
for inst in slave_nodes:
inst.terminate()
# Delete security groups as well
if opts.delete_groups:
print "Deleting security groups (this will take some time)..."
group_names = [cluster_name + "-master", cluster_name + "-slaves"]
wait_for_cluster_state(
conn=conn,
opts=opts,
cluster_instances=(master_nodes + slave_nodes),
cluster_state='terminated'
)
attempt = 1
while attempt <= 3:
print "Attempt %d" % attempt
groups = [g for g in conn.get_all_security_groups() if g.name in group_names]
success = True
# Delete individual rules in all groups before deleting groups to
# remove dependencies between them
for group in groups:
print "Deleting rules in security group " + group.name
for rule in group.rules:
for grant in rule.grants:
success &= group.revoke(ip_protocol=rule.ip_protocol,
from_port=rule.from_port,
to_port=rule.to_port,
src_group=grant)
# Sleep for AWS eventual-consistency to catch up, and for instances
# to terminate
time.sleep(30) # Yes, it does have to be this long :-(
for group in groups:
try:
# It is needed to use group_id to make it work with VPC
conn.delete_security_group(group_id=group.id)
print "Deleted security group %s" % group.name
except boto.exception.EC2ResponseError:
success = False
print "Failed to delete security group %s" % group.name
# Unfortunately, group.revoke() returns True even if a rule was not
# deleted, so this needs to be rerun if something fails
if success:
break
attempt += 1
if not success:
print "Failed to delete all security groups after 3 tries."
print "Try re-running in a few minutes."
elif action == "login":
(master_nodes, slave_nodes) = get_existing_cluster(conn, opts, cluster_name)
master = master_nodes[0].public_dns_name
print "Logging into master " + master + "..."
proxy_opt = []
if opts.proxy_port is not None:
proxy_opt = ['-D', opts.proxy_port]
subprocess.check_call(
ssh_command(opts) + proxy_opt + ['-t', '-t', "%s@%s" % (opts.user, master)])
elif action == "reboot-slaves":
response = raw_input(
"Are you sure you want to reboot the cluster " +
cluster_name + " slaves?\n" +
"Reboot cluster slaves " + cluster_name + " (y/N): ")
if response == "y":
(master_nodes, slave_nodes) = get_existing_cluster(
conn, opts, cluster_name, die_on_error=False)
print "Rebooting slaves..."
for inst in slave_nodes:
if inst.state not in ["shutting-down", "terminated"]:
print "Rebooting " + inst.id
inst.reboot()
elif action == "get-master":
(master_nodes, slave_nodes) = get_existing_cluster(conn, opts, cluster_name)
print master_nodes[0].public_dns_name
elif action == "stop":
response = raw_input(
"Are you sure you want to stop the cluster " +
cluster_name + "?\nDATA ON EPHEMERAL DISKS WILL BE LOST, " +
"BUT THE CLUSTER WILL KEEP USING SPACE ON\n" +
"AMAZON EBS IF IT IS EBS-BACKED!!\n" +
"All data on spot-instance slaves will be lost.\n" +
"Stop cluster " + cluster_name + " (y/N): ")
if response == "y":
(master_nodes, slave_nodes) = get_existing_cluster(
conn, opts, cluster_name, die_on_error=False)
print "Stopping master..."
for inst in master_nodes:
if inst.state not in ["shutting-down", "terminated"]:
inst.stop()
print "Stopping slaves..."
for inst in slave_nodes:
if inst.state not in ["shutting-down", "terminated"]:
if inst.spot_instance_request_id:
inst.terminate()
else:
inst.stop()
elif action == "start":
(master_nodes, slave_nodes) = get_existing_cluster(conn, opts, cluster_name)
print "Starting slaves..."
for inst in slave_nodes:
if inst.state not in ["shutting-down", "terminated"]:
inst.start()
print "Starting master..."
for inst in master_nodes:
if inst.state not in ["shutting-down", "terminated"]:
inst.start()
wait_for_cluster_state(
conn=conn,
opts=opts,
cluster_instances=(master_nodes + slave_nodes),
cluster_state='ssh-ready'
)
setup_cluster(conn, master_nodes, slave_nodes, opts, False)
else:
print >> stderr, "Invalid action: %s" % action
sys.exit(1)
def main():
try:
real_main()
except UsageError, e:
print >> stderr, "\nError:\n", e
sys.exit(1)
if __name__ == "__main__":
logging.basicConfig()
main()
|
|
# Copyright 2016-2017 Capital One Services, LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import absolute_import, division, print_function, unicode_literals
from .common import BaseTest
import datetime
from dateutil import tz as tzutil
from c7n.resources.dynamodb import DeleteTable
from c7n.executor import MainThreadExecutor
class DynamodbTest(BaseTest):
def test_resources(self):
session_factory = self.replay_flight_data("test_dynamodb_table")
p = self.load_policy(
{"name": "tables", "resource": "dynamodb-table"},
session_factory=session_factory,
)
resources = p.run()
self.assertEqual(len(resources), 1)
self.assertEqual(resources[0]["TableName"], "test-table-kms-filter")
self.assertEqual(resources[0]["TableStatus"], "ACTIVE")
def test_invoke_action(self):
session_factory = self.replay_flight_data("test_dynamodb_invoke_action")
p = self.load_policy(
{
"name": "tables",
"resource": "dynamodb-table",
"actions": [{"type": "invoke-lambda", "function": "process_resources"}],
},
session_factory=session_factory,
)
resources = p.run()
self.assertEqual(len(resources), 1)
def test_delete_tables(self):
session_factory = self.replay_flight_data("test_dynamodb_delete_table")
self.patch(DeleteTable, "executor_factory", MainThreadExecutor)
p = self.load_policy(
{
"name": "delete-empty-tables",
"resource": "dynamodb-table",
"filters": [{"TableSizeBytes": 0}],
"actions": [{"type": "delete"}],
},
session_factory=session_factory,
)
resources = p.run()
self.assertEqual(resources[0]["TableName"], "c7n.DynamoDB.01")
def test_tag_filter(self):
session_factory = self.replay_flight_data("test_dynamodb_tag_filter")
client = session_factory().client("dynamodb")
p = self.load_policy(
{
"name": "dynamodb-tag-filters",
"resource": "dynamodb-table",
"filters": [{"tag:test_key": "test_value"}],
},
session_factory=session_factory,
config={'region': 'us-west-2', 'account_id': '644160558196'})
resources = p.run()
self.assertEqual(len(resources), 1)
arn = resources[0]["TableArn"]
tags = client.list_tags_of_resource(ResourceArn=arn)
tag_map = {t["Key"]: t["Value"] for t in tags["Tags"]}
self.assertTrue("test_key" in tag_map)
def test_kms_key_filter(self):
session_factory = self.replay_flight_data("test_dynamodb_kms_key_filter")
p = self.load_policy(
{
"name": "dynamodb-kms-key-filters",
"resource": "dynamodb-table",
"filters": [
{
"type": "kms-key",
"key": "c7n:AliasName",
"value": "^(alias/aws/dynamodb)",
"op": "regex"
}
]
},
session_factory=session_factory,
)
resources = p.run()
self.assertEqual(len(resources), 1)
self.assertEqual(resources[0]["TableName"], "test-table-kms-filter")
def test_dynamodb_mark(self):
session_factory = self.replay_flight_data("test_dynamodb_mark")
client = session_factory().client("dynamodb")
p = self.load_policy(
{
"name": "dynamodb-mark",
"resource": "dynamodb-table",
"filters": [
{"TableName": "c7n-test"},
{'tag:test_tag': 'absent'}
],
"actions": [
{
"type": "mark-for-op",
"days": 0,
"op": "delete",
"tag": "test_tag",
}
],
},
session_factory=session_factory,
)
resources = p.run()
arn = resources[0]["TableArn"]
self.assertEqual(len(resources), 1)
tags = client.list_tags_of_resource(ResourceArn=arn)
tag_map = {t["Key"]: t["Value"] for t in tags["Tags"]}
localtz = tzutil.gettz("America/New_York")
dt = datetime.datetime.now(localtz)
dt = dt.replace(year=2018, month=6, day=8, hour=7, minute=00)
result = datetime.datetime.strptime(
tag_map["test_tag"].strip().split("@", 1)[-1], "%Y/%m/%d"
).replace(
tzinfo=localtz
)
self.assertEqual(result.date(), dt.date())
def test_dynamodb_tag(self):
session_factory = self.replay_flight_data("test_dynamodb_tag")
client = session_factory().client("dynamodb")
p = self.load_policy(
{
"name": "dynamodb-tag-table",
"resource": "dynamodb-table",
"filters": [{"TableName": "rolltop"}],
"actions": [{"type": "tag", "tags": {"new_tag_key": "new_tag_value"}}],
},
session_factory=session_factory,
)
resources = p.run()
arn = resources[0]["TableArn"]
tags = client.list_tags_of_resource(ResourceArn=arn)
tag_map = {t["Key"]: t["Value"] for t in tags["Tags"]}
self.assertEqual(
{"test_key": "test_value", "new_tag_key": "new_tag_value"}, tag_map
)
def test_dynamodb_unmark(self):
session_factory = self.replay_flight_data("test_dynamodb_unmark")
client = session_factory().client("dynamodb")
p = self.load_policy(
{
"name": "dynamodb-unmark",
"resource": "dynamodb-table",
"filters": [{"TableName": "rolltop"}],
"actions": [{"type": "remove-tag", "tags": ["test_key"]}],
},
session_factory=session_factory,
)
resources = p.run()
arn = resources[0]["TableArn"]
self.assertEqual(len(resources), 1)
tags = client.list_tags_of_resource(ResourceArn=arn)
self.assertFalse("test_key" in tags)
def test_dynamodb_create_backup(self):
dt = datetime.datetime.now().replace(
year=2018, month=1, day=16, hour=19, minute=39
)
suffix = dt.strftime("%Y-%m-%d-%H-%M")
session_factory = self.replay_flight_data("test_dynamodb_create_backup")
p = self.load_policy(
{
"name": "c7n-dynamodb-create-backup",
"resource": "dynamodb-table",
"filters": [{"TableName": "c7n-dynamodb-backup"}],
"actions": [{"type": "backup"}],
},
session_factory=session_factory,
)
resources = p.run()
self.assertEqual(len(resources), 1)
client = session_factory().client("dynamodb")
arn = resources[0]["c7n:BackupArn"]
table = client.describe_backup(BackupArn=arn)
self.assertEqual(
table["BackupDescription"]["BackupDetails"]["BackupName"],
"Backup-c7n-dynamodb-backup-%s" % (suffix),
)
def test_dynamodb_create_prefixed_backup(self):
dt = datetime.datetime.now().replace(
year=2018, month=1, day=22, hour=13, minute=42
)
suffix = dt.strftime("%Y-%m-%d-%H-%M")
session_factory = self.replay_flight_data(
"test_dynamodb_create_prefixed_backup"
)
p = self.load_policy(
{
"name": "c7n-dynamodb-create-prefixed-backup",
"resource": "dynamodb-table",
"filters": [{"TableName": "c7n-dynamodb-backup"}],
"actions": [{"type": "backup", "prefix": "custom"}],
},
session_factory=session_factory,
)
resources = p.run()
self.assertEqual(len(resources), 1)
client = session_factory().client("dynamodb")
arn = resources[0]["c7n:BackupArn"]
table = client.describe_backup(BackupArn=arn)
self.assertEqual(
table["BackupDescription"]["BackupDetails"]["BackupName"],
"custom-c7n-dynamodb-backup-%s" % (suffix),
)
def test_dynamodb_delete_backup(self):
factory = self.replay_flight_data("test_dynamodb_delete_backup")
p = self.load_policy(
{
"name": "c7n-dynamodb-delete-backup",
"resource": "dynamodb-backup",
"filters": [{"TableName": "omnissm-registrations"}],
"actions": ["delete"],
},
session_factory=factory,
)
resources = p.run()
self.assertEqual(len(resources), 1)
def test_dynamodb_enable_stream(self):
factory = self.replay_flight_data("test_dynamodb_enable_stream")
p = self.load_policy(
{
"name": "c7n-dynamodb-enable-stream",
"resource": "dynamodb-table",
"filters": [{"TableName": "c7n-test"}, {"TableStatus": "ACTIVE"}],
"actions": [
{
"type": "set-stream",
"state": True,
"stream_view_type": "NEW_IMAGE",
}
],
},
session_factory=factory,
)
resources = p.run()
stream_field = resources[0]["c7n:StreamState"]
stream_type = resources[0]["c7n:StreamType"]
self.assertEqual(len(resources), 1)
self.assertTrue(stream_field)
self.assertEqual("NEW_IMAGE", stream_type)
class DynamoDbAccelerator(BaseTest):
def test_resources(self):
session_factory = self.replay_flight_data("test_dax_resources")
p = self.load_policy(
{"name": "dax-resources", "resource": "dax"},
session_factory=session_factory,
)
resources = p.run()
self.assertEqual(len(resources), 1)
self.assertEqual(resources[0]["Status"], "available")
def test_dax_security_group(self):
session_factory = self.replay_flight_data("test_dax_security_group_filter")
p = self.load_policy(
{
"name": "dax-resources",
"resource": "dax",
"filters": [
{"type": "security-group", "key": "GroupName", "value": "default"}
],
},
session_factory=session_factory,
)
resources = p.run()
self.assertEqual(len(resources), 1)
self.assertEqual(resources[0]["ClusterName"], "c7n-dax")
def test_tagging(self):
session_factory = self.replay_flight_data("test_dax_add_tags")
p = self.load_policy(
{
"name": "dax-resources",
"resource": "dax",
"filters": [{"tag:Required": "absent"}],
"actions": [{"type": "tag", "tags": {"Required": "Required"}}],
},
session_factory=session_factory,
)
resources = p.run()
self.assertEqual(len(resources), 1)
self.assertEqual(resources[0]["ClusterName"], "c7n-dax")
client = session_factory(region="us-east-1").client("dax")
tags = client.list_tags(ResourceName=resources[0]["ClusterArn"])["Tags"]
self.assertEqual(tags[0]["Value"], "Required")
def test_remove_tagging(self):
session_factory = self.replay_flight_data("test_dax_remove_tags")
p = self.load_policy(
{
"name": "dax-resources",
"resource": "dax",
"filters": [{"tag:Required": "present"}],
"actions": [{"type": "remove-tag", "tags": ["Required"]}],
},
session_factory=session_factory,
)
resources = p.run()
self.assertEqual(len(resources), 1)
self.assertEqual(resources[0]["ClusterName"], "c7n-dax")
client = session_factory(region="us-east-1").client("dax")
tags = client.list_tags(ResourceName=resources[0]["ClusterArn"])["Tags"]
self.assertEqual(tags, [{"Key": "Name", "Value": "c7n-dax-test"}])
def test_mark_for_op(self):
session_factory = self.replay_flight_data("test_dax_mark_for_op")
p = self.load_policy(
{
"name": "dax-resources",
"resource": "dax",
"filters": [
{"tag:custodian_cleanup": "absent"}, {"tag:Required": "absent"}
],
"actions": [
{
"type": "mark-for-op",
"tag": "custodian_cleanup",
"op": "delete",
"days": 7,
}
],
},
session_factory=session_factory,
)
resources = p.run()
self.assertEqual(len(resources), 1)
self.assertEqual(resources[0]["ClusterName"], "c7n-dax")
client = session_factory(region="us-east-1").client("dax")
tags = client.list_tags(ResourceName=resources[0]["ClusterArn"])["Tags"]
self.assertEqual(tags[0]["Key"], "custodian_cleanup")
self.assertEqual(
tags[0]["Value"], "Resource does not meet policy: delete@2018/05/15"
)
def test_delete(self):
session_factory = self.replay_flight_data("test_dax_delete_cluster")
p = self.load_policy(
{
"name": "dax-resources",
"resource": "dax",
"filters": [{"tag:Required": "absent"}],
"actions": [{"type": "delete"}],
},
session_factory=session_factory,
)
resources = p.run()
self.assertEqual(len(resources), 1)
client = session_factory(region='us-east-1').client('dax')
clusters = client.describe_clusters()['Clusters']
self.assertEqual(clusters[0]['Status'], 'deleting')
def test_update_cluster(self):
session_factory = self.replay_flight_data(
'test_dax_update_cluster')
p = self.load_policy({
'name': 'dax-resources',
'resource': 'dax',
'filters': [{
'ParameterGroup.ParameterGroupName': 'default.dax1.0'}],
'actions': [{
'type': 'update-cluster',
'ParameterGroupName': 'testparamgroup'}]
}, session_factory=session_factory)
resources = p.run()
self.assertEqual(len(resources), 1)
self.assertEqual(resources[0]['ClusterName'], 'c7n-dax')
client = session_factory(region='us-east-1').client('dax')
clusters = client.describe_clusters()['Clusters']
self.assertEqual(clusters[0]['ParameterGroup']['ParameterGroupName'],
'testparamgroup')
def test_modify_security_groups(self):
session_factory = self.replay_flight_data(
'test_dax_update_security_groups')
p = self.load_policy({
'name': 'dax-resources',
'resource': 'dax',
'filters': [{
'type': 'security-group',
'key': 'GroupName',
'value': 'default'}],
'actions': [{
'type': 'modify-security-groups',
'remove': 'matched',
'add': 'sg-72916c3b'}]
}, session_factory=session_factory)
resources = p.run()
self.assertEqual(len(resources), 1)
self.assertEqual(resources[0]['ClusterName'], 'c7n-dax')
client = session_factory(region='us-east-1').client('dax')
sgs = client.describe_clusters()['Clusters'][0]['SecurityGroups']
self.assertDictEqual(sgs[0], {"Status": "adding",
"SecurityGroupIdentifier": "sg-72916c3b"})
self.assertDictEqual(sgs[1], {"Status": "removing",
"SecurityGroupIdentifier": "sg-4b9ada34"})
def test_subnet_group_filter(self):
session_factory = self.replay_flight_data(
"test_dax_subnet_group_filter")
p = self.load_policy({
"name": "dax-cluster",
"resource": "dax",
"filters": [{
"type": "subnet",
"key": "MapPublicIpOnLaunch",
"value": False}]}, session_factory=session_factory)
resources = p.run()
self.assertEqual(len(resources), 1)
self.assertEqual(resources[0]['ClusterName'], 'c7n-test')
def test_dax_get_resource(self):
session_factory = self.replay_flight_data('test_dax_get_resource')
p = self.load_policy({
'name': 'dax-cluster-gr', 'resource': 'dax'},
session_factory=session_factory)
resources = p.resource_manager.get_resources(
["c7n-test-cluster"])
self.assertEqual(len(resources), 1)
self.assertEqual(resources[0]['TotalNodes'], 1)
|
|
from sympy.integrals.transforms import (mellin_transform,
inverse_mellin_transform, laplace_transform, inverse_laplace_transform,
fourier_transform, inverse_fourier_transform,
sine_transform, inverse_sine_transform,
cosine_transform, inverse_cosine_transform,
hankel_transform, inverse_hankel_transform,
LaplaceTransform, FourierTransform, SineTransform, CosineTransform,
InverseLaplaceTransform, InverseFourierTransform, InverseSineTransform, InverseCosineTransform,
HankelTransform, InverseHankelTransform)
from sympy import (
gamma, exp, oo, Heaviside, symbols, Symbol, re, factorial, pi,
cos, S, And, sin, sqrt, I, log, tan, hyperexpand, meijerg,
EulerGamma, erf, besselj, bessely, besseli, besselk,
exp_polar, polar_lift, unpolarify, Function, expint)
from sympy.utilities.pytest import XFAIL, slow, skip
from sympy.abc import x, s, a, b
nu, beta, rho = symbols('nu beta rho')
def test_undefined_function():
from sympy import Function, MellinTransform
f = Function('f')
assert mellin_transform(f(x), x, s) == MellinTransform(f(x), x, s)
assert mellin_transform(f(x) + exp(-x), x, s) == \
(MellinTransform(f(x), x, s) + gamma(s), (0, oo), True)
assert laplace_transform(2*f(x), x, s) == 2*LaplaceTransform(f(x), x, s)
# TODO test derivative and other rules when implemented
def test_free_symbols():
from sympy import Function
f = Function('f')
assert mellin_transform(f(x), x, s).free_symbols == set([s])
assert mellin_transform(f(x)*a, x, s).free_symbols == set([s, a])
def test_as_integral():
from sympy import Function, Integral
f = Function('f')
assert mellin_transform(f(x), x, s).rewrite('Integral') == \
Integral(x**(s - 1)*f(x), (x, 0, oo))
assert fourier_transform(f(x), x, s).rewrite('Integral') == \
Integral(f(x)*exp(-2*I*pi*s*x), (x, -oo, oo))
assert laplace_transform(f(x), x, s).rewrite('Integral') == \
Integral(f(x)*exp(-s*x), (x, 0, oo))
assert str(inverse_mellin_transform(f(s), s, x, (a, b)).rewrite('Integral')) \
== "Integral(x**(-s)*f(s), (s, _c - oo*I, _c + oo*I))"
assert str(inverse_laplace_transform(f(s), s, x).rewrite('Integral')) == \
"Integral(f(s)*exp(s*x), (s, _c - oo*I, _c + oo*I))"
assert inverse_fourier_transform(f(s), s, x).rewrite('Integral') == \
Integral(f(s)*exp(2*I*pi*s*x), (s, -oo, oo))
# NOTE this is stuck in risch because meijerint cannot handle it
@slow
@XFAIL
def test_mellin_transform_fail():
skip("Risch takes forever.")
from sympy import Max, Min
MT = mellin_transform
bpos = symbols('b', positive=True)
bneg = symbols('b', negative=True)
expr = (sqrt(x + b**2) + b)**a/sqrt(x + b**2)
# TODO does not work with bneg, argument wrong. Needs changes to matching.
assert MT(expr.subs(b, -bpos), x, s) == \
((-1)**(a + 1)*2**(a + 2*s)*bpos**(a + 2*s - 1)*gamma(a + s)
*gamma(1 - a - 2*s)/gamma(1 - s),
(-re(a), -re(a)/2 + S(1)/2), True)
expr = (sqrt(x + b**2) + b)**a
assert MT(expr.subs(b, -bpos), x, s) == \
(
2**(a + 2*s)*a*bpos**(a + 2*s)*gamma(-a - 2*
s)*gamma(a + s)/gamma(-s + 1),
(-re(a), -re(a)/2), True)
# Test exponent 1:
assert MT(expr.subs({b: -bpos, a: 1}), x, s) == \
(-bpos**(2*s + 1)*gamma(s)*gamma(-s - S(1)/2)/(2*sqrt(pi)),
(-1, -S(1)/2), True)
def test_mellin_transform():
from sympy import Max, Min, Ne
MT = mellin_transform
bpos = symbols('b', positive=True)
# 8.4.2
assert MT(x**nu*Heaviside(x - 1), x, s) == \
(1/(-nu - s), (-oo, -re(nu)), True)
assert MT(x**nu*Heaviside(1 - x), x, s) == \
(1/(nu + s), (-re(nu), oo), True)
assert MT((1 - x)**(beta - 1)*Heaviside(1 - x), x, s) == \
(gamma(beta)*gamma(s)/gamma(beta + s), (0, oo), re(-beta) < 0)
assert MT((x - 1)**(beta - 1)*Heaviside(x - 1), x, s) == \
(gamma(beta)*gamma(1 - beta - s)/gamma(1 - s),
(-oo, -re(beta) + 1), re(-beta) < 0)
assert MT((1 + x)**(-rho), x, s) == \
(gamma(s)*gamma(rho - s)/gamma(rho), (0, re(rho)), True)
# TODO also the conditions should be simplified
assert MT(abs(1 - x)**(-rho), x, s) == (
cos(pi*(rho/2 - s))*gamma(s)*gamma(rho - s)/(cos(pi*rho/2)*gamma(rho)),
(0, re(rho)), And(re(rho) - 1 < 0, re(rho) < 1))
mt = MT((1 - x)**(beta - 1)*Heaviside(1 - x)
+ a*(x - 1)**(beta - 1)*Heaviside(x - 1), x, s)
assert mt[1], mt[2] == ((0, -re(beta) + 1), True)
assert MT((x**a - b**a)/(x - b), x, s)[0] == \
pi*b**(a + s - 1)*sin(pi*a)/(sin(pi*s)*sin(pi*(a + s)))
assert MT((x**a - bpos**a)/(x - bpos), x, s) == \
(pi*bpos**(a + s - 1)*sin(pi*a)/(sin(pi*s)*sin(pi*(a + s))),
(Max(-re(a), 0), Min(1 - re(a), 1)), True)
expr = (sqrt(x + b**2) + b)**a
assert MT(expr.subs(b, bpos), x, s) == \
(-a*(2*bpos)**(a + 2*s)*gamma(s)*gamma(-a - 2*s)/gamma(-a - s + 1),
(0, -re(a)/2), True)
expr = (sqrt(x + b**2) + b)**a/sqrt(x + b**2)
assert MT(expr.subs(b, bpos), x, s) == \
(2**(a + 2*s)*bpos**(a + 2*s - 1)*gamma(s)
*gamma(1 - a - 2*s)/gamma(1 - a - s),
(0, -re(a)/2 + S(1)/2), True)
# 8.4.2
assert MT(exp(-x), x, s) == (gamma(s), (0, oo), True)
assert MT(exp(-1/x), x, s) == (gamma(-s), (-oo, 0), True)
# 8.4.5
assert MT(log(x)**4*Heaviside(1 - x), x, s) == (24/s**5, (0, oo), True)
assert MT(log(x)**3*Heaviside(x - 1), x, s) == (6/s**4, (-oo, 0), True)
assert MT(log(x + 1), x, s) == (pi/(s*sin(pi*s)), (-1, 0), True)
assert MT(log(1/x + 1), x, s) == (pi/(s*sin(pi*s)), (0, 1), True)
assert MT(log(abs(1 - x)), x, s) == (pi/(s*tan(pi*s)), (-1, 0), True)
assert MT(log(abs(1 - 1/x)), x, s) == (pi/(s*tan(pi*s)), (0, 1), True)
# TODO we cannot currently do these (needs summation of 3F2(-1))
# this also implies that they cannot be written as a single g-function
# (although this is possible)
mt = MT(log(x)/(x + 1), x, s)
assert mt[1:] == ((0, 1), True)
assert not hyperexpand(mt[0], allow_hyper=True).has(meijerg)
mt = MT(log(x)**2/(x + 1), x, s)
assert mt[1:] == ((0, 1), True)
assert not hyperexpand(mt[0], allow_hyper=True).has(meijerg)
mt = MT(log(x)/(x + 1)**2, x, s)
assert mt[1:] == ((0, 2), True)
assert not hyperexpand(mt[0], allow_hyper=True).has(meijerg)
# 8.4.14
assert MT(erf(sqrt(x)), x, s) == \
(-gamma(s + S(1)/2)/(sqrt(pi)*s), (-S(1)/2, 0), True)
def test_mellin_transform_bessel():
from sympy import Max, Min, hyper, meijerg
MT = mellin_transform
# 8.4.19
assert MT(besselj(a, 2*sqrt(x)), x, s) == \
(gamma(a/2 + s)/gamma(a/2 - s + 1), (-re(a)/2, S(3)/4), True)
assert MT(sin(sqrt(x))*besselj(a, sqrt(x)), x, s) == \
(2**a*gamma(S(1)/2 - 2*s)*gamma((a + 1)/2 + s)
/ (gamma(1 - s - a/2)*gamma(1 + a - 2*s)),
(-(re(a) + 1)/2, S(1)/4), True)
assert MT(cos(sqrt(x))*besselj(a, sqrt(x)), x, s) == \
(2**a*gamma(a/2 + s)*gamma(-2*s + S(1)/2)/(gamma(-a/2 - s + S(1)/2)*
gamma(a - 2*s + 1)), (-re(a)/2, S(1)/4), True)
assert MT(besselj(a, sqrt(x))**2, x, s) == \
(gamma(a + s)*gamma(S(1)/2 - s)
/ (sqrt(pi)*gamma(1 - s)*gamma(1 + a - s)),
(-re(a), S(1)/2), True)
assert MT(besselj(a, sqrt(x))*besselj(-a, sqrt(x)), x, s) == \
(gamma(s)*gamma(S(1)/2 - s)
/ (sqrt(pi)*gamma(1 - a - s)*gamma(1 + a - s)),
(0, S(1)/2), True)
# NOTE: prudnikov gives the strip below as (1/2 - re(a), 1). As far as
# I can see this is wrong (since besselj(z) ~ 1/sqrt(z) for z large)
assert MT(besselj(a - 1, sqrt(x))*besselj(a, sqrt(x)), x, s) == \
(gamma(1 - s)*gamma(a + s - S(1)/2)
/ (sqrt(pi)*gamma(S(3)/2 - s)*gamma(a - s + S(1)/2)),
(S(1)/2 - re(a), S(1)/2), True)
assert MT(besselj(a, sqrt(x))*besselj(b, sqrt(x)), x, s) == \
(4**s*gamma(1 - 2*s)*gamma((a + b)/2 + s)
/ (gamma(1 - s + (b - a)/2)*gamma(1 - s + (a - b)/2)
*gamma( 1 - s + (a + b)/2)),
(-(re(a) + re(b))/2, S(1)/2), True)
assert MT(besselj(a, sqrt(x))**2 + besselj(-a, sqrt(x))**2, x, s)[1:] == \
((Max(re(a), -re(a)), S(1)/2), True)
# Section 8.4.20
assert MT(bessely(a, 2*sqrt(x)), x, s) == \
(-cos(pi*(a/2 - s))*gamma(s - a/2)*gamma(s + a/2)/pi,
(Max(-re(a)/2, re(a)/2), S(3)/4), True)
assert MT(sin(sqrt(x))*bessely(a, sqrt(x)), x, s) == \
(-4**s*sin(pi*(a/2 - s))*gamma(S(1)/2 - 2*s)
* gamma((1 - a)/2 + s)*gamma((1 + a)/2 + s)
/ (sqrt(pi)*gamma(1 - s - a/2)*gamma(1 - s + a/2)),
(Max(-(re(a) + 1)/2, (re(a) - 1)/2), S(1)/4), True)
assert MT(cos(sqrt(x))*bessely(a, sqrt(x)), x, s) == \
(-4**s*cos(pi*(a/2 - s))*gamma(s - a/2)*gamma(s + a/2)*gamma(S(1)/2 - 2*s)
/ (sqrt(pi)*gamma(S(1)/2 - s - a/2)*gamma(S(1)/2 - s + a/2)),
(Max(-re(a)/2, re(a)/2), S(1)/4), True)
assert MT(besselj(a, sqrt(x))*bessely(a, sqrt(x)), x, s) == \
(-cos(pi*s)*gamma(s)*gamma(a + s)*gamma(S(1)/2 - s)
/ (pi**S('3/2')*gamma(1 + a - s)),
(Max(-re(a), 0), S(1)/2), True)
assert MT(besselj(a, sqrt(x))*bessely(b, sqrt(x)), x, s) == \
(-4**s*cos(pi*(a/2 - b/2 + s))*gamma(1 - 2*s)
* gamma(a/2 - b/2 + s)*gamma(a/2 + b/2 + s)
/ (pi*gamma(a/2 - b/2 - s + 1)*gamma(a/2 + b/2 - s + 1)),
(Max((-re(a) + re(b))/2, (-re(a) - re(b))/2), S(1)/2), True)
# NOTE bessely(a, sqrt(x))**2 and bessely(a, sqrt(x))*bessely(b, sqrt(x))
# are a mess (no matter what way you look at it ...)
assert MT(bessely(a, sqrt(x))**2, x, s)[1:] == \
((Max(-re(a), 0, re(a)), S(1)/2), True)
# Section 8.4.22
# TODO we can't do any of these (delicate cancellation)
# Section 8.4.23
assert MT(besselk(a, 2*sqrt(x)), x, s) == \
(gamma(
s - a/2)*gamma(s + a/2)/2, (Max(-re(a)/2, re(a)/2), oo), True)
assert MT(besselj(a, 2*sqrt(2*sqrt(x)))*besselk(a, 2*sqrt(2*sqrt(x))), x, s) == \
(4**(-s)*gamma(2*s)*gamma(a/2 + s)/gamma(a/2 - s + 1)/2,
(Max(-re(a)/2, 0), oo), True)
# TODO bessely(a, x)*besselk(a, x) is a mess
assert MT(besseli(a, sqrt(x))*besselk(a, sqrt(x)), x, s) == \
(
gamma(s)*gamma(
a + s)*gamma(-s + S(1)/2)/(2*sqrt(pi)*gamma(a - s + 1)),
(Max(-re(a), 0), S(1)/2), True)
assert MT(besseli(b, sqrt(x))*besselk(a, sqrt(x)), x, s) == \
(4**s*gamma(-2*s + 1)*gamma(-a/2 + b/2 + s)*gamma(a/2 + b/2 + s)/
(2*gamma(-a/2 + b/2 - s + 1)*gamma(a/2 + b/2 - s + 1)),
(Max(-re(a)/2 - re(b)/2, re(a)/2 - re(b)/2), S(1)/2), True)
# TODO products of besselk are a mess
# TODO this can be simplified considerably (although I have no idea how)
mt = MT(exp(-x/2)*besselk(a, x/2), x, s)
assert not mt[0].has(meijerg, hyper)
assert mt[1:] == ((Max(-re(a), re(a)), oo), True)
# TODO exp(x/2)*besselk(a, x/2) [etc] cannot currently be done
# TODO various strange products of special orders
def test_expint():
from sympy import E1, expint, Max, re, lerchphi, Symbol, simplify, Si, Ci, Ei
aneg = Symbol('a', negative=True)
u = Symbol('u', polar=True)
assert mellin_transform(E1(x), x, s) == (gamma(s)/s, (0, oo), True)
assert inverse_mellin_transform(gamma(s)/s, s, x,
(0, oo)).rewrite(expint).expand() == E1(x)
assert mellin_transform(expint(a, x), x, s) == \
(gamma(s)/(a + s - 1), (Max(1 - re(a), 0), oo), True)
# XXX IMT has hickups with complicated strips ...
assert simplify(unpolarify(
inverse_mellin_transform(gamma(s)/(aneg + s - 1), s, x,
(1 - aneg, oo)).rewrite(expint).expand(func=True))) == \
expint(aneg, x)
assert mellin_transform(Si(x), x, s) == \
(-2**s*sqrt(pi)*gamma((s + 1)/2)/(2*s*gamma(-s/2 + 1)
), (-1, 0), True)
assert inverse_mellin_transform(-2**s*sqrt(pi)*gamma((s + 1)/2)
/(2*s*gamma(-s/2 + 1)), s, x, (-1, 0)) \
== Si(x)
assert mellin_transform(Ci(sqrt(x)), x, s) == \
(-4**s*sqrt(pi)*gamma(s)/(2*s*gamma(-s + S(1)/2)), (0, 1), True)
assert inverse_mellin_transform(
-4**s*sqrt(pi)*gamma(s)/(2*s*gamma(-s + S(1)/2)),
s, u, (0, 1)).expand() == Ci(sqrt(u))
# TODO LT of Si, Shi, Chi is a mess ...
assert laplace_transform(Ci(x), x, s) == (-log(1 + s**2)/2/s, 0, True)
assert laplace_transform(expint(a, x), x, s) == \
(lerchphi(s*polar_lift(-1), 1, a), 0, S(0) < re(a))
assert laplace_transform(expint(1, x), x, s) == (log(s + 1)/s, 0, True)
assert laplace_transform(expint(2, x), x, s) == \
((s - log(s + 1))/s**2, 0, True)
assert inverse_laplace_transform(-log(1 + s**2)/2/s, s, u).expand() == \
Heaviside(u)*Ci(u)
assert inverse_laplace_transform(log(s + 1)/s, s, x).rewrite(expint) == \
Heaviside(x)*E1(x)
assert inverse_laplace_transform((s - log(s + 1))/s**2, s,
x).rewrite(expint).expand() == \
(expint(2, x)*Heaviside(x)).rewrite(Ei).rewrite(expint).expand()
def test_inverse_mellin_transform():
from sympy import (sin, simplify, expand_func, powsimp, Max, Min, expand,
powdenest, powsimp, exp_polar, combsimp, cos, cot)
IMT = inverse_mellin_transform
assert IMT(gamma(s), s, x, (0, oo)) == exp(-x)
assert IMT(gamma(-s), s, x, (-oo, 0)) == exp(-1/x)
assert simplify(IMT(s/(2*s**2 - 2), s, x, (2, oo))) == \
(x**2 + 1)*Heaviside(1 - x)/(4*x)
# test passing "None"
assert IMT(1/(s**2 - 1), s, x, (-1, None)) == \
-x*Heaviside(-x + 1)/2 - Heaviside(x - 1)/(2*x)
assert IMT(1/(s**2 - 1), s, x, (None, 1)) == \
-x*Heaviside(-x + 1)/2 - Heaviside(x - 1)/(2*x)
# test expansion of sums
assert IMT(gamma(s) + gamma(s - 1), s, x, (1, oo)) == (x + 1)*exp(-x)/x
# test factorisation of polys
r = symbols('r', real=True)
assert IMT(1/(s**2 + 1), s, exp(-x), (None, oo)
).subs(x, r).rewrite(sin).simplify() \
== sin(r)*Heaviside(1 - exp(-r))
# test multiplicative substitution
a, b = symbols('a b', positive=True)
c, d = symbols('c d')
assert IMT(b**(-s/a)*factorial(s/a)/s, s, x, (0, oo)) == exp(-b*x**a)
assert IMT(factorial(a/b + s/b)/(a + s), s, x, (-a, oo)) == x**a*exp(-x**b)
from sympy import expand_mul
def simp_pows(expr):
return simplify(powsimp(expand_mul(expr, deep=False), force=True)).replace(exp_polar, exp)
# Now test the inverses of all direct transforms tested above
# Section 8.4.2
assert IMT(-1/(nu + s), s, x, (-oo, None)) == x**nu*Heaviside(x - 1)
assert IMT(1/(nu + s), s, x, (None, oo)) == x**nu*Heaviside(1 - x)
assert simp_pows(IMT(gamma(beta)*gamma(s)/gamma(s + beta), s, x, (0, oo))) \
== (1 - x)**(beta - 1)*Heaviside(1 - x)
assert simp_pows(IMT(gamma(beta)*gamma(1 - beta - s)/gamma(1 - s),
s, x, (-oo, None))) \
== (x - 1)**(beta - 1)*Heaviside(x - 1)
assert simp_pows(IMT(gamma(s)*gamma(rho - s)/gamma(rho), s, x, (0, None))) \
== (1/(x + 1))**rho
assert simp_pows(IMT(d**c*d**(s - 1)*sin(pi*c)
*gamma(s)*gamma(s + c)*gamma(1 - s)*gamma(1 - s - c)/pi,
s, x, (Max(-re(c), 0), Min(1 - re(c), 1)))) \
== (x**c - d**c)/(x - d)
assert simplify(IMT(1/sqrt(pi)*(-c/2)*gamma(s)*gamma((1 - c)/2 - s)
*gamma(-c/2 - s)/gamma(1 - c - s),
s, x, (0, -re(c)/2))) == \
(1 + sqrt(x + 1))**c
assert simplify(IMT(2**(a + 2*s)*b**(a + 2*s - 1)*gamma(s)*gamma(1 - a - 2*s)
/gamma(1 - a - s), s, x, (0, (-re(a) + 1)/2))) == \
(b + sqrt(
b**2 + x))**(a - 1)*(b**2 + b*sqrt(b**2 + x) + x)/(b**2 + x)
assert simplify(IMT(-2**(c + 2*s)*c*b**(c + 2*s)*gamma(s)*gamma(-c - 2*s)
/ gamma(-c - s + 1), s, x, (0, -re(c)/2))) == \
(b + sqrt(b**2 + x))**c
# Section 8.4.5
assert IMT(24/s**5, s, x, (0, oo)) == log(x)**4*Heaviside(1 - x)
assert expand(IMT(6/s**4, s, x, (-oo, 0)), force=True) == \
log(x)**3*Heaviside(x - 1)
assert IMT(pi/(s*sin(pi*s)), s, x, (-1, 0)) == log(x + 1)
assert IMT(pi/(s*sin(pi*s/2)), s, x, (-2, 0)) == log(x**2 + 1)
assert IMT(pi/(s*sin(2*pi*s)), s, x, (-S(1)/2, 0)) == log(sqrt(x) + 1)
assert IMT(pi/(s*sin(pi*s)), s, x, (0, 1)) == log(1 + 1/x)
# TODO
def mysimp(expr):
from sympy import expand, logcombine, powsimp
return expand(
powsimp(logcombine(expr, force=True), force=True, deep=True),
force=True).replace(exp_polar, exp)
assert mysimp(mysimp(IMT(pi/(s*tan(pi*s)), s, x, (-1, 0)))) in [
log(1 - x)*Heaviside(1 - x) + log(x - 1)*Heaviside(x - 1),
log(x)*Heaviside(x - 1) + log(1 - 1/x)*Heaviside(x - 1) + log(-x +
1)*Heaviside(-x + 1)]
# test passing cot
assert mysimp(IMT(pi*cot(pi*s)/s, s, x, (0, 1))) in [
log(1/x - 1)*Heaviside(1 - x) + log(1 - 1/x)*Heaviside(x - 1),
-log(x)*Heaviside(-x + 1) + log(1 - 1/x)*Heaviside(x - 1) + log(-x +
1)*Heaviside(-x + 1), ]
# 8.4.14
assert IMT(-gamma(s + S(1)/2)/(sqrt(pi)*s), s, x, (-S(1)/2, 0)) == \
erf(sqrt(x))
# 8.4.19
assert simplify(IMT(gamma(a/2 + s)/gamma(a/2 - s + 1), s, x, (-re(a)/2, S(3)/4))) \
== besselj(a, 2*sqrt(x))
assert simplify(IMT(2**a*gamma(S(1)/2 - 2*s)*gamma(s + (a + 1)/2)
/ (gamma(1 - s - a/2)*gamma(1 - 2*s + a)),
s, x, (-(re(a) + 1)/2, S(1)/4))) == \
sin(sqrt(x))*besselj(a, sqrt(x))
assert simplify(IMT(2**a*gamma(a/2 + s)*gamma(S(1)/2 - 2*s)
/ (gamma(S(1)/2 - s - a/2)*gamma(1 - 2*s + a)),
s, x, (-re(a)/2, S(1)/4))) == \
cos(sqrt(x))*besselj(a, sqrt(x))
# TODO this comes out as an amazing mess, but simplifies nicely
assert simplify(IMT(gamma(a + s)*gamma(S(1)/2 - s)
/ (sqrt(pi)*gamma(1 - s)*gamma(1 + a - s)),
s, x, (-re(a), S(1)/2))) == \
besselj(a, sqrt(x))**2
assert simplify(IMT(gamma(s)*gamma(S(1)/2 - s)
/ (sqrt(pi)*gamma(1 - s - a)*gamma(1 + a - s)),
s, x, (0, S(1)/2))) == \
besselj(-a, sqrt(x))*besselj(a, sqrt(x))
assert simplify(IMT(4**s*gamma(-2*s + 1)*gamma(a/2 + b/2 + s)
/ (gamma(-a/2 + b/2 - s + 1)*gamma(a/2 - b/2 - s + 1)
*gamma(a/2 + b/2 - s + 1)),
s, x, (-(re(a) + re(b))/2, S(1)/2))) == \
besselj(a, sqrt(x))*besselj(b, sqrt(x))
# Section 8.4.20
# TODO this can be further simplified!
assert simplify(IMT(-2**(2*s)*cos(pi*a/2 - pi*b/2 + pi*s)*gamma(-2*s + 1) *
gamma(a/2 - b/2 + s)*gamma(a/2 + b/2 + s) /
(pi*gamma(a/2 - b/2 - s + 1)*gamma(a/2 + b/2 - s + 1)),
s, x,
(Max(-re(a)/2 - re(b)/2, -re(a)/2 + re(b)/2), S(1)/2))) == \
(-cos(pi*b)*besselj(b, sqrt(x)) + besselj(-b, sqrt(x))) * \
besselj(a, sqrt(x))/sin(pi*b)*(-1)
# TODO more
# for coverage
assert IMT(pi/cos(pi*s), s, x, (0, S(1)/2)) == sqrt(x)/(x + 1)
def test_laplace_transform():
from sympy import (fresnels, fresnelc, hyper)
LT = laplace_transform
a, b, c, = symbols('a b c', positive=True)
t = symbols('t')
w = Symbol("w")
f = Function("f")
# Test unevaluated form
assert laplace_transform(f(t), t, w) == LaplaceTransform(f(t), t, w)
assert inverse_laplace_transform(
f(w), w, t, plane=0) == InverseLaplaceTransform(f(w), w, t, 0)
# test a bug
spos = symbols('s', positive=True)
assert LT(exp(t), t, spos)[:2] == (1/(spos - 1), True)
# basic tests from wikipedia
assert LT((t - a)**b*exp(-c*(t - a))*Heaviside(t - a), t, s) == \
((s + c)**(-b - 1)*exp(-a*s)*gamma(b + 1), -c, True)
assert LT(t**a, t, s) == (s**(-a - 1)*gamma(a + 1), 0, True)
assert LT(Heaviside(t), t, s) == (1/s, 0, True)
assert LT(Heaviside(t - a), t, s) == (exp(-a*s)/s, 0, True)
assert LT(1 - exp(-a*t), t, s) == (a/(s*(a + s)), 0, True)
assert LT((exp(2*t) - 1)*exp(-b - t)*Heaviside(t)/2, t, s, noconds=True) \
== exp(-b)/(s**2 - 1)
assert LT(exp(t), t, s)[:2] == (1/(s - 1), 1)
assert LT(exp(2*t), t, s)[:2] == (1/(s - 2), 2)
assert LT(exp(a*t), t, s)[:2] == (1/(s - a), a)
assert LT(log(t/a), t, s) == ((log(a) + log(s) + EulerGamma)/(-s), 0, True)
assert LT(erf(t), t, s) == ((-erf(s/2) + 1)*exp(s**2/4)/s, 0, True)
assert LT(sin(a*t), t, s) == (a/(a**2 + s**2), 0, True)
assert LT(cos(a*t), t, s) == (s/(a**2 + s**2), 0, True)
# TODO would be nice to have these come out better
assert LT(
exp(-a*t)*sin(b*t), t, s) == (1/b/(1 + (a + s)**2/b**2), -a, True)
assert LT(exp(-a*t)*cos(b*t), t, s) == \
(1/(s + a)/(1 + b**2/(a + s)**2), -a, True)
# TODO sinh, cosh have delicate cancellation
assert LT(besselj(0, t), t, s) == (1/sqrt(1 + s**2), 0, True)
assert LT(besselj(1, t), t, s) == (1 - 1/sqrt(1 + 1/s**2), 0, True)
# TODO general order works, but is a *mess*
# TODO besseli also works, but is an even greater mess
# test a bug in conditions processing
# TODO the auxiliary condition should be recognised/simplified
assert LT(exp(t)*cos(t), t, s)[:-1] in [
((s - 1)/(s**2 - 2*s + 2), -oo),
((s - 1)/((s - 1)**2 + 1), -oo),
]
# Fresnel functions
assert laplace_transform(fresnels(t), t, s) == \
((-sin(s**2/(2*pi))*fresnels(s/pi) + sin(s**2/(2*pi))/2 -
cos(s**2/(2*pi))*fresnelc(s/pi) + cos(s**2/(2*pi))/2)/s, 0, True)
assert laplace_transform(fresnelc(t), t, s) == \
(sqrt(2)*(sqrt(2)*sin(s**2/(2*pi))*fresnelc(s/pi) -
sqrt(2)*cos(s**2/(2*pi))*fresnels(s/pi) + cos(s**2/(2*pi) +
pi/4))/(2*s), 0, True)
def test_inverse_laplace_transform():
from sympy import (expand, sinh, cosh, besselj, besseli, exp_polar,
unpolarify, simplify)
ILT = inverse_laplace_transform
a, b, c, = symbols('a b c', positive=True)
t = symbols('t')
def simp_hyp(expr):
return expand(expand(expr).rewrite(sin))
# just test inverses of all of the above
assert ILT(1/s, s, t) == Heaviside(t)
assert ILT(1/s**2, s, t) == t*Heaviside(t)
assert ILT(1/s**5, s, t) == t**4*Heaviside(t)/24
assert ILT(exp(-a*s)/s, s, t) == Heaviside(t - a)
assert ILT(exp(-a*s)/(s + b), s, t) == exp(b*(a - t))*Heaviside(-a + t)
assert ILT(a/(s**2 + a**2), s, t) == sin(a*t)*Heaviside(t)
assert ILT(s/(s**2 + a**2), s, t) == cos(a*t)*Heaviside(t)
# TODO is there a way around simp_hyp?
assert simp_hyp(ILT(a/(s**2 - a**2), s, t)) == sinh(a*t)*Heaviside(t)
assert simp_hyp(ILT(s/(s**2 - a**2), s, t)) == cosh(a*t)*Heaviside(t)
assert ILT(a/((s + b)**2 + a**2), s, t) == exp(-b*t)*sin(a*t)*Heaviside(t)
assert ILT(
(s + b)/((s + b)**2 + a**2), s, t) == exp(-b*t)*cos(a*t)*Heaviside(t)
# TODO sinh/cosh shifted come out a mess. also delayed trig is a mess
# TODO should this simplify further?
assert ILT(exp(-a*s)/s**b, s, t) == \
(t - a)**(b - 1)*Heaviside(t - a)/gamma(b)
assert ILT(exp(-a*s)/sqrt(1 + s**2), s, t) == \
Heaviside(t - a)*besselj(0, a - t) # note: besselj(0, x) is even
# XXX ILT turns these branch factor into trig functions ...
assert simplify(ILT(a**b*(s + sqrt(s**2 - a**2))**(-b)/sqrt(s**2 - a**2),
s, t).rewrite(exp)) == \
Heaviside(t)*besseli(b, a*t)
assert ILT(a**b*(s + sqrt(s**2 + a**2))**(-b)/sqrt(s**2 + a**2),
s, t).rewrite(exp) == \
Heaviside(t)*besselj(b, a*t)
assert ILT(1/(s*sqrt(s + 1)), s, t) == Heaviside(t)*erf(sqrt(t))
# TODO can we make erf(t) work?
def test_fourier_transform():
from sympy import simplify, expand, expand_complex, factor, expand_trig
FT = fourier_transform
IFT = inverse_fourier_transform
def simp(x):
return simplify(expand_trig(expand_complex(expand(x))))
def sinc(x):
return sin(pi*x)/(pi*x)
k = symbols('k', real=True)
f = Function("f")
# TODO for this to work with real a, need to expand abs(a*x) to abs(a)*abs(x)
a = symbols('a', positive=True)
b = symbols('b', positive=True)
posk = symbols('posk', positive=True)
# Test unevaluated form
assert fourier_transform(f(x), x, k) == FourierTransform(f(x), x, k)
assert inverse_fourier_transform(
f(k), k, x) == InverseFourierTransform(f(k), k, x)
# basic examples from wikipedia
assert simp(FT(Heaviside(1 - abs(2*a*x)), x, k)) == sinc(k/a)/a
# TODO IFT is a *mess*
assert simp(FT(Heaviside(1 - abs(a*x))*(1 - abs(a*x)), x, k)) == sinc(k/a)**2/a
# TODO IFT
assert factor(FT(exp(-a*x)*Heaviside(x), x, k), extension=I) == \
1/(a + 2*pi*I*k)
# NOTE: the ift comes out in pieces
assert IFT(1/(a + 2*pi*I*x), x, posk,
noconds=False) == (exp(-a*posk), True)
assert IFT(1/(a + 2*pi*I*x), x, -posk,
noconds=False) == (0, True)
assert IFT(1/(a + 2*pi*I*x), x, symbols('k', negative=True),
noconds=False) == (0, True)
# TODO IFT without factoring comes out as meijer g
assert factor(FT(x*exp(-a*x)*Heaviside(x), x, k), extension=I) == \
1/(a + 2*pi*I*k)**2
assert FT(exp(-a*x)*sin(b*x)*Heaviside(x), x, k) == \
1/b/(1 + a**2*(1 + 2*pi*I*k/a)**2/b**2)
assert FT(exp(-a*x**2), x, k) == sqrt(pi)*exp(-pi**2*k**2/a)/sqrt(a)
assert IFT(sqrt(pi/a)*exp(-(pi*k)**2/a), k, x) == exp(-a*x**2)
assert FT(exp(-a*abs(x)), x, k) == 2*a/(a**2 + 4*pi**2*k**2)
# TODO IFT (comes out as meijer G)
# TODO besselj(n, x), n an integer > 0 actually can be done...
# TODO are there other common transforms (no distributions!)?
def test_sine_transform():
from sympy import sinh, cosh, EulerGamma
t = symbols("t")
w = symbols("w")
a = symbols("a")
f = Function("f")
# Test unevaluated form
assert sine_transform(f(t), t, w) == SineTransform(f(t), t, w)
assert inverse_sine_transform(
f(w), w, t) == InverseSineTransform(f(w), w, t)
assert sine_transform(1/sqrt(t), t, w) == 1/sqrt(w)
assert inverse_sine_transform(1/sqrt(w), w, t) == 1/sqrt(t)
assert sine_transform(
(1/sqrt(t))**3, t, w) == sqrt(w)*gamma(S(1)/4)/(2*gamma(S(5)/4))
assert sine_transform(t**(-a), t, w) == 2**(
-a + S(1)/2)*w**(a - 1)*gamma(-a/2 + 1)/gamma((a + 1)/2)
assert inverse_sine_transform(2**(-a + S(
1)/2)*w**(a - 1)*gamma(-a/2 + 1)/gamma(a/2 + S(1)/2), w, t) == t**(-a)
assert sine_transform(
exp(-a*t), t, w) == sqrt(2)*w/(sqrt(pi)*(a**2 + w**2))
assert inverse_sine_transform(
sqrt(2)*w/(sqrt(pi)*(a**2 + w**2)), w, t) == -sinh(a*t) + cosh(a*t)
assert sine_transform(
log(t)/t, t, w) == sqrt(2)*sqrt(pi)*(-log(w**2) - 2*EulerGamma)/4
assert sine_transform(
t*exp(-a*t**2), t, w) == sqrt(2)*w*exp(-w**2/(4*a))/(4*a**(S(3)/2))
assert inverse_sine_transform(
sqrt(2)*w*exp(-w**2/(4*a))/(4*a**(S(3)/2)), w, t) == t*exp(-a*t**2)
def test_cosine_transform():
from sympy import sinh, cosh, Si, Ci
t = symbols("t")
w = symbols("w")
a = symbols("a")
f = Function("f")
# Test unevaluated form
assert cosine_transform(f(t), t, w) == CosineTransform(f(t), t, w)
assert inverse_cosine_transform(
f(w), w, t) == InverseCosineTransform(f(w), w, t)
assert cosine_transform(1/sqrt(t), t, w) == 1/sqrt(w)
assert inverse_cosine_transform(1/sqrt(w), w, t) == 1/sqrt(t)
assert cosine_transform(1/(
a**2 + t**2), t, w) == -sqrt(2)*sqrt(pi)*(sinh(a*w) - cosh(a*w))/(2*a)
assert cosine_transform(t**(
-a), t, w) == 2**(-a + S(1)/2)*w**(a - 1)*gamma((-a + 1)/2)/gamma(a/2)
assert inverse_cosine_transform(2**(-a + S(
1)/2)*w**(a - 1)*gamma(-a/2 + S(1)/2)/gamma(a/2), w, t) == t**(-a)
assert cosine_transform(
exp(-a*t), t, w) == sqrt(2)*a/(sqrt(pi)*(a**2 + w**2))
assert inverse_cosine_transform(
sqrt(2)*a/(sqrt(pi)*(a**2 + w**2)), w, t) == -sinh(a*t) + cosh(a*t)
assert cosine_transform(exp(-a*sqrt(t))*cos(a*sqrt(
t)), t, w) == -a*(sinh(a**2/(2*w)) - cosh(a**2/(2*w)))/(2*w**(S(3)/2))
assert cosine_transform(1/(a + t), t, w) == -sqrt(
2)*((2*Si(a*w) - pi)*sin(a*w) + 2*cos(a*w)*Ci(a*w))/(2*sqrt(pi))
assert inverse_cosine_transform(sqrt(2)*meijerg(((S(1)/2, 0), ()), (
(S(1)/2, 0, 0), (S(1)/2,)), a**2*w**2/4)/(2*pi), w, t) == 1/(a + t)
assert cosine_transform(1/sqrt(a**2 + t**2), t, w) == sqrt(2)*meijerg(
((S(1)/2,), ()), ((0, 0), (S(1)/2,)), a**2*w**2/4)/(2*sqrt(pi))
assert inverse_cosine_transform(sqrt(2)*meijerg(((S(1)/2,), ()), ((0, 0), (S(1)/2,)), a**2*w**2/4)/(2*sqrt(pi)), w, t) == 1/(t*sqrt(a**2/t**2 + 1))
def test_hankel_transform():
from sympy import sinh, cosh, gamma, sqrt, exp
r = Symbol("r")
k = Symbol("k")
nu = Symbol("nu")
m = Symbol("m")
a = symbols("a")
assert hankel_transform(1/r, r, k, 0) == 1/k
assert inverse_hankel_transform(1/k, k, r, 0) == 1/r
assert hankel_transform(
1/r**m, r, k, 0) == 2**(-m + 1)*k**(m - 2)*gamma(-m/2 + 1)/gamma(m/2)
assert inverse_hankel_transform(
2**(-m + 1)*k**(m - 2)*gamma(-m/2 + 1)/gamma(m/2), k, r, 0) == r**(-m)
assert hankel_transform(1/r**m, r, k, nu) == 2**(
-m + 1)*k**(m - 2)*gamma(-m/2 + nu/2 + 1)/gamma(m/2 + nu/2)
assert inverse_hankel_transform(2**(-m + 1)*k**(
m - 2)*gamma(-m/2 + nu/2 + 1)/gamma(m/2 + nu/2), k, r, nu) == r**(-m)
assert hankel_transform(r**nu*exp(-a*r), r, k, nu) == \
2**(nu + 1)*a*k**(-nu - 3)*(a**2/k**2 + 1)**(-nu - S(
3)/2)*gamma(nu + S(3)/2)/sqrt(pi)
assert inverse_hankel_transform(2**(nu + 1)*a*k**(-nu - 3)*(a**2/k**2 + 1)**(-nu - S(3)/2)*gamma(nu + S(3)/2)/sqrt(pi), k, r, nu) == \
r**nu*(-sinh(a*r) + cosh(a*r))
|
|
# -*- coding: utf-8 -*-
# Copyright (C) 2016-2019 by Brendt Wohlberg <brendt@ieee.org>
# Cristina Garcia-Cardona <cgarciac@lanl.gov>
# All rights reserved. BSD 3-clause License.
# This file is part of the SPORCO package. Details of the copyright
# and user license can be found in the 'LICENSE.txt' file distributed
# with the package.
"""FISTA algorithms for the CCMOD problem"""
from __future__ import division, absolute_import
import copy
import numpy as np
from sporco.fista import fista
from sporco.array import atleast_nd
from sporco.fft import (rfftn, irfftn, empty_aligned, rfftn_empty_aligned,
rfl2norm2)
from sporco.linalg import inner
from sporco.cnvrep import CDU_ConvRepIndexing, getPcn, bcrop
__author__ = """Cristina Garcia-Cardona <cgarciac@lanl.gov>"""
class ConvCnstrMOD(fista.FISTADFT):
r"""
Base class for FISTA algorithm for Convolutional Constrained MOD
problem :cite:`garcia-2018-convolutional1`.
|
.. inheritance-diagram:: ConvCnstrMOD
:parts: 2
|
Solve the optimisation problem
.. math::
\mathrm{argmin}_\mathbf{d} \;
(1/2) \sum_k \left\| \sum_m \mathbf{d}_m * \mathbf{x}_{k,m} -
\mathbf{s}_k \right\|_2^2 \quad \text{such that} \quad
\mathbf{d}_m \in C
via the FISTA problem
.. math::
\mathrm{argmin}_\mathbf{d} \;
(1/2) \sum_k \left\| \sum_m \mathbf{d}_m * \mathbf{x}_{k,m} -
\mathbf{s}_k \right\|_2^2 + \sum_m \iota_C(\mathbf{d}_m) \;\;,
where :math:`\iota_C(\cdot)` is the indicator function of feasible
set :math:`C` consisting of filters with unit norm and constrained
support. Multi-channel problems with input image channels
:math:`\mathbf{s}_{c,k}` are also supported, either as
.. math::
\mathrm{argmin}_\mathbf{d} \; (1/2) \sum_c \sum_k \left\| \sum_m
\mathbf{d}_m * \mathbf{x}_{c,k,m} - \mathbf{s}_{c,k} \right\|_2^2
\quad \text{such that} \quad \mathbf{d}_m \in C
with single-channel dictionary filters :math:`\mathbf{d}_m` and
multi-channel coefficient maps :math:`\mathbf{x}_{c,k,m}`, or
.. math::
\mathrm{argmin}_\mathbf{d} \; (1/2) \sum_c \sum_k \left\| \sum_m
\mathbf{d}_{c,m} * \mathbf{x}_{k,m} - \mathbf{s}_{c,k}
\right\|_2^2 \quad \text{such that} \quad \mathbf{d}_{c,m} \in C
with multi-channel dictionary filters :math:`\mathbf{d}_{c,m}` and
single-channel coefficient maps :math:`\mathbf{x}_{k,m}`. In this
latter case, normalisation of filters :math:`\mathbf{d}_{c,m}` is
performed jointly over index :math:`c` for each filter :math:`m`.
After termination of the :meth:`solve` method, attribute :attr:`itstat`
is a list of tuples representing statistics of each iteration. The
fields of the named tuple ``IterationStats`` are:
``Iter`` : Iteration number
``DFid`` : Value of data fidelity term :math:`(1/2) \sum_k \|
\sum_m \mathbf{d}_m * \mathbf{x}_{k,m} - \mathbf{s}_k \|_2^2`
``Cnstr`` : Constraint violation measure
``Rsdl`` : Residual
``L`` : Inverse of gradient step parameter
``Time`` : Cumulative run time
"""
class Options(fista.FISTADFT.Options):
r"""ConvCnstrMOD algorithm options
Options include all of those defined in
:class:`.fista.FISTADFT.Options`, together with
additional options:
``ZeroMean`` : Flag indicating whether the solution
dictionary :math:`\{\mathbf{d}_m\}` should have zero-mean
components.
"""
defaults = copy.deepcopy(fista.FISTADFT.Options.defaults)
defaults.update({'ZeroMean': False})
def __init__(self, opt=None):
"""
Parameters
----------
opt : dict or None, optional (default None)
ConvCnstrMOD algorithm options
"""
if opt is None:
opt = {}
fista.FISTADFT.Options.__init__(self, opt)
def __setitem__(self, key, value):
"""Set options."""
fista.FISTADFT.Options.__setitem__(self, key, value)
itstat_fields_objfn = ('DFid', 'Cnstr')
hdrtxt_objfn = ('DFid', 'Cnstr')
hdrval_objfun = {'DFid': 'DFid', 'Cnstr': 'Cnstr'}
def __init__(self, Z, S, dsz, opt=None, dimK=1, dimN=2):
"""
This class supports an arbitrary number of spatial dimensions,
`dimN`, with a default of 2. The input coefficient map array `Z`
(usually labelled X, but renamed here to avoid confusion with
the X and Y variables in the FISTA base class) is expected to
be in standard form as computed by the GenericConvBPDN class.
The input signal set `S` is either `dimN` dimensional (no
channels, only one signal), `dimN` +1 dimensional (either
multiple channels or multiple signals), or `dimN` +2 dimensional
(multiple channels and multiple signals). Parameter `dimK`, with
a default value of 1, indicates the number of multiple-signal
dimensions in `S`:
::
Default dimK = 1, i.e. assume input S is of form
S(N0, N1, C, K) or S(N0, N1, K)
If dimK = 0 then input S is of form
S(N0, N1, C, K) or S(N0, N1, C)
The internal data layout for S, D (X here), and X (Z here) is:
::
dim<0> - dim<Nds-1> : Spatial dimensions, product of N0,N1,... is N
dim<Nds> : C number of channels in S and D
dim<Nds+1> : K number of signals in S
dim<Nds+2> : M number of filters in D
sptl. chn sig flt
S(N0, N1, C, K, 1)
D(N0, N1, C, 1, M) (X here)
X(N0, N1, 1, K, M) (Z here)
The `dsz` parameter indicates the desired filter supports in the
output dictionary, since this cannot be inferred from the
input variables. The format is the same as the `dsz` parameter
of :func:`.cnvrep.bcrop`.
Parameters
----------
Z : array_like
Coefficient map array
S : array_like
Signal array
dsz : tuple
Filter support size(s)
opt : ccmod.Options object
Algorithm options
dimK : int, optional (default 1)
Number of dimensions for multiple signals in input S
dimN : int, optional (default 2)
Number of spatial dimensions
"""
# Set default options if none specified
if opt is None:
opt = ConvCnstrMOD.Options()
# Infer problem dimensions and set relevant attributes of self
self.cri = CDU_ConvRepIndexing(dsz, S, dimK=dimK, dimN=dimN)
# Call parent class __init__
xshape = self.cri.shpD
super(ConvCnstrMOD, self).__init__(xshape, S.dtype, opt)
# Set gradient step parameter
self.set_attr('L', opt['L'], dval=self.cri.K * 14.0, dtype=self.dtype)
# Reshape S to standard layout (Z, i.e. X in cbpdn, is assumed
# to be taken from cbpdn, and therefore already in standard
# form). If the dictionary has a single channel but the input
# (and therefore also the coefficient map array) has multiple
# channels, the channel index and multiple image index have
# the same behaviour in the dictionary update equation: the
# simplest way to handle this is to just reshape so that the
# channels also appear on the multiple image index.
if self.cri.Cd == 1 and self.cri.C > 1:
self.S = S.reshape(self.cri.Nv + (1,) +
(self.cri.C * self.cri.K,) + (1,))
else:
self.S = S.reshape(self.cri.shpS)
self.S = np.asarray(self.S, dtype=self.dtype)
# Compute signal S in DFT domain
self.Sf = rfftn(self.S, None, self.cri.axisN)
# Create constraint set projection function
self.Pcn = getPcn(dsz, self.cri.Nv, self.cri.dimN, self.cri.dimCd,
zm=opt['ZeroMean'])
# Create byte aligned arrays for FFT calls
self.Y = self.X
self.X = empty_aligned(self.Y.shape, dtype=self.dtype)
self.X[:] = self.Y
# Initialise auxiliary variable Vf: Create byte aligned arrays
# for FFT calls
self.Vf = rfftn_empty_aligned(self.X.shape, self.cri.axisN,
self.dtype)
self.Xf = rfftn(self.X, None, self.cri.axisN)
self.Yf = self.Xf
self.store_prev()
self.Yfprv = self.Yf.copy() + 1e5
# Initialization needed for back tracking (if selected)
self.postinitialization_backtracking_DFT()
if Z is not None:
self.setcoef(Z)
def setcoef(self, Z):
"""Set coefficient array."""
# If the dictionary has a single channel but the input (and
# therefore also the coefficient map array) has multiple
# channels, the channel index and multiple image index have
# the same behaviour in the dictionary update equation: the
# simplest way to handle this is to just reshape so that the
# channels also appear on the multiple image index.
if self.cri.Cd == 1 and self.cri.C > 1:
Z = Z.reshape(self.cri.Nv + (1,) + (self.cri.Cx * self.cri.K,) +
(self.cri.M,))
self.Z = np.asarray(Z, dtype=self.dtype)
self.Zf = rfftn(self.Z, self.cri.Nv, self.cri.axisN)
def getdict(self, crop=True):
"""Get final dictionary. If ``crop`` is ``True``, apply
:func:`.cnvrep.bcrop` to returned array.
"""
D = self.X
if crop:
D = bcrop(D, self.cri.dsz, self.cri.dimN)
return D
def eval_grad(self):
"""Compute gradient in Fourier domain."""
# Compute X D - S
Ryf = self.eval_Rf(self.Yf)
gradf = inner(np.conj(self.Zf), Ryf, axis=self.cri.axisK)
# Multiple channel signal, single channel dictionary
if self.cri.C > 1 and self.cri.Cd == 1:
gradf = np.sum(gradf, axis=self.cri.axisC, keepdims=True)
return gradf
def eval_Rf(self, Vf):
"""Evaluate smooth term in Vf."""
return inner(self.Zf, Vf, axis=self.cri.axisM) - self.Sf
def eval_proxop(self, V):
"""Compute proximal operator of :math:`g`."""
return self.Pcn(V)
def rsdl(self):
"""Compute fixed point residual in Fourier domain."""
diff = self.Xf - self.Yfprv
return rfl2norm2(diff, self.X.shape, axis=self.cri.axisN)
def eval_objfn(self):
"""Compute components of objective function as well as total
contribution to objective function.
"""
dfd = self.obfn_dfd()
cns = self.obfn_cns()
return (dfd, cns)
def obfn_dfd(self):
r"""Compute data fidelity term :math:`(1/2) \| \sum_m
\mathbf{d}_m * \mathbf{x}_m - \mathbf{s} \|_2^2`.
"""
Ef = self.eval_Rf(self.Xf)
return rfl2norm2(Ef, self.S.shape, axis=self.cri.axisN) / 2.0
def obfn_cns(self):
r"""Compute constraint violation measure :math:`\|
P(\mathbf{y}) - \mathbf{y}\|_2`.
"""
return np.linalg.norm((self.Pcn(self.X) - self.X))
def obfn_f(self, Xf=None):
r"""Compute data fidelity term :math:`(1/2) \| \sum_m
\mathbf{d}_m * \mathbf{x}_m - \mathbf{s} \|_2^2`.
This is used for backtracking. Since the backtracking is
computed in the DFT, it is important to preserve the
DFT scaling.
"""
if Xf is None:
Xf = self.Xf
Rf = self.eval_Rf(Xf)
return 0.5 * np.linalg.norm(Rf.flatten(), 2)**2
def reconstruct(self, D=None):
"""Reconstruct representation."""
if D is None:
Df = self.Xf
else:
Df = rfftn(D, None, self.cri.axisN)
Sf = np.sum(self.Zf * Df, axis=self.cri.axisM)
return irfftn(Sf, self.cri.Nv, self.cri.axisN)
class ConvCnstrMODMask(ConvCnstrMOD):
r"""
FISTA algorithm for Convolutional Constrained MOD problem
with a spatial mask :cite:`garcia-2018-convolutional1`.
|
.. inheritance-diagram:: ConvCnstrMODMask
:parts: 2
|
Solve the optimisation problem
.. math::
\mathrm{argmin}_\mathbf{d} \;
(1/2) \left\| W \left(\sum_m \mathbf{d}_m * \mathbf{x}_m -
\mathbf{s}\right) \right\|_2^2 \quad \text{such that} \quad
\mathbf{d}_m \in C \;\; \forall m
where :math:`C` is the feasible set consisting of filters with unit
norm and constrained support, and :math:`W` is a mask array, via the
FISTA problem
.. math::
\mathrm{argmin}_{\mathbf{d}} \; (1/2) \left\| W \left(X
\mathbf{d} - \mathbf{s}\right) \right\|_2^2 +
\iota_C(\mathbf{d}_m) \;\;,
where :math:`\iota_C(\cdot)` is the indicator function of feasible
set :math:`C`, and :math:`X \mathbf{d} = \sum_m \mathbf{x}_m *
\mathbf{d}_m`.
See :class:`ConvCnstrMOD` for interface details.
"""
class Options(ConvCnstrMOD.Options):
"""ConvCnstrMODMask algorithm options
Options include all of those defined in
:class:`.fista.FISTA.Options`.
"""
defaults = copy.deepcopy(ConvCnstrMOD.Options.defaults)
def __init__(self, opt=None):
"""
Parameters
----------
opt : dict or None, optional (default None)
ConvCnstrMODMask algorithm options
"""
if opt is None:
opt = {}
ConvCnstrMOD.Options.__init__(self, opt)
def __init__(self, Z, S, W, dsz, opt=None, dimK=None, dimN=2):
"""
Parameters
----------
Z : array_like
Coefficient map array
S : array_like
Signal array
W : array_like
Mask array. The array shape must be such that the array is
compatible for multiplication with the *internal* shape of
input array S (see :class:`.cnvrep.CDU_ConvRepIndexing` for a
discussion of the distinction between *external* and
*internal* data layouts).
dsz : tuple
Filter support size(s)
opt : :class:`ConvCnstrMODMask.Options` object
Algorithm options
dimK : 0, 1, or None, optional (default None)
Number of dimensions in input signal corresponding to multiple
independent signals
dimN : int, optional (default 2)
Number of spatial dimensions
"""
# Set default options if none specified
if opt is None:
opt = ConvCnstrMODMask.Options()
# Infer problem dimensions and set relevant attributes of self
self.cri = CDU_ConvRepIndexing(dsz, S, dimK=dimK, dimN=dimN)
# Append singleton dimensions to W if necessary
if hasattr(W, 'ndim'):
W = atleast_nd(self.cri.dimN + 3, W)
# Reshape W if necessary (see discussion of reshape of S in
# ccmod base class)
if self.cri.Cd == 1 and self.cri.C > 1 and hasattr(W, 'ndim'):
# In most cases broadcasting rules make it possible for W
# to have a singleton dimension corresponding to a
# non-singleton dimension in S. However, when S is
# reshaped to interleave axisC and axisK on the same axis,
# broadcasting is no longer sufficient unless axisC and
# axisK of W are either both singleton or both of the same
# size as the corresponding axes of S. If neither of these
# cases holds, it is necessary to replicate the axis of W
# (axisC or axisK) that does not have the same size as the
# corresponding axis of S.
shpw = list(W.shape)
swck = shpw[self.cri.axisC] * shpw[self.cri.axisK]
if swck > 1 and swck < self.cri.C * self.cri.K:
if W.shape[self.cri.axisK] == 1 and self.cri.K > 1:
shpw[self.cri.axisK] = self.cri.K
else:
shpw[self.cri.axisC] = self.cri.C
W = np.broadcast_to(W, shpw)
self.W = W.reshape(
W.shape[0:self.cri.dimN] +
(1, W.shape[self.cri.axisC] * W.shape[self.cri.axisK], 1))
else:
self.W = W
super(ConvCnstrMODMask, self).__init__(Z, S, dsz, opt, dimK, dimN)
# Create byte aligned arrays for FFT calls
self.WRy = empty_aligned(self.S.shape, dtype=self.dtype)
self.Ryf = rfftn_empty_aligned(self.S.shape, self.cri.axisN,
self.dtype)
def eval_grad(self):
"""Compute gradient in Fourier domain."""
# Compute X D - S
self.Ryf[:] = self.eval_Rf(self.Yf)
# Map to spatial domain to multiply by mask
Ry = irfftn(self.Ryf, self.cri.Nv, self.cri.axisN)
# Multiply by mask
self.WRy[:] = (self.W**2) * Ry
# Map back to frequency domain
WRyf = rfftn(self.WRy, self.cri.Nv, self.cri.axisN)
gradf = inner(np.conj(self.Zf), WRyf, axis=self.cri.axisK)
# Multiple channel signal, single channel dictionary
if self.cri.C > 1 and self.cri.Cd == 1:
gradf = np.sum(gradf, axis=self.cri.axisC, keepdims=True)
return gradf
def obfn_dfd(self):
r"""Compute data fidelity term :math:`(1/2) \sum_k \| W (\sum_m
\mathbf{d}_m * \mathbf{x}_{k,m} - \mathbf{s}_k) \|_2^2`
"""
Ef = self.eval_Rf(self.Xf)
E = irfftn(Ef, self.cri.Nv, self.cri.axisN)
return (np.linalg.norm(self.W * E)**2) / 2.0
def obfn_f(self, Xf=None):
r"""Compute data fidelity term :math:`(1/2) \sum_k \| W (\sum_m
\mathbf{d}_m * \mathbf{x}_{k,m} - \mathbf{s}_k) \|_2^2`.
This is used for backtracking. Since the backtracking is
computed in the DFT, it is important to preserve the
DFT scaling.
"""
if Xf is None:
Xf = self.Xf
Rf = self.eval_Rf(Xf)
R = irfftn(Rf, self.cri.Nv, self.cri.axisN)
WRf = rfftn(self.W * R, self.cri.Nv, self.cri.axisN)
return 0.5 * np.linalg.norm(WRf.flatten(), 2)**2
|
|
"""Plot timeseries data."""
import warnings
import numpy as np
from ..sel_utils import xarray_var_iter
from ..rcparams import rcParams
from .plot_utils import default_grid, get_plotting_function
def plot_ts(
idata,
y,
x=None,
y_hat=None,
y_holdout=None,
y_forecasts=None,
x_holdout=None,
plot_dim=None,
holdout_dim=None,
num_samples=100,
backend=None,
backend_kwargs=None,
y_kwargs=None,
y_hat_plot_kwargs=None,
y_mean_plot_kwargs=None,
vline_kwargs=None,
textsize=None,
figsize=None,
legend=True,
axes=None,
show=None,
):
"""Plot timeseries data.
Parameters
----------
idata : InferenceData
:class:`arviz.InferenceData` object.
y : str
Variable name from ``observed_data``.
Values to be plotted on y-axis before holdout.
x : str, Optional
Values to be plotted on x-axis before holdout.
If None, coords of ``y`` dims is chosen.
y_hat : str, optional
Variable name from ``posterior_predictive``.
Assumed to be of shape ``(chain, draw, *y_dims)``.
y_holdout : str, optional
Variable name from ``observed_data``.
It represents the observed data after the holdout period.
Useful while testing the model, when you want to compare
observed test data with predictions/forecasts.
y_forecasts : str, optional
Variable name from ``posterior_predictive``.
It represents forecasts (posterior predictive) values after holdout period.
Useful to compare observed vs predictions/forecasts.
Assumed shape ``(chain, draw, *shape)``.
x_holdout : str, Defaults to coords of y.
Variable name from ``constant_data``.
If None, coords of ``y_holdout`` or
coords of ``y_forecast`` (either of the two available) is chosen.
plot_dim: str, Optional
Should be present in ``y.dims``.
Necessary for selection of ``x`` if ``x`` is None and ``y`` is multidimensional.
holdout_dim: str, Optional
Should be present in ``y_holdout.dims`` or ``y_forecats.dims``.
Necessary to choose ``x_holdout`` if ``x`` is None and
if ``y_holdout`` or ``y_forecasts`` is multidimensional.
num_samples : int, default 100
Number of posterior predictive samples drawn from ``y_hat`` and ``y_forecasts``.
backend : {"matplotlib", "bokeh"}, default "matplotlib"
Select plotting backend.
y_kwargs : dict, optional
Passed to :meth:`matplotlib.axes.Axes.plot` in matplotlib.
y_hat_plot_kwargs : dict, optional
Passed to :meth:`matplotlib.axes.Axes.plot` in matplotlib.
y_mean_plot_kwargs : dict, optional
Passed to :meth:`matplotlib.axes.Axes.plot` in matplotlib.
vline_kwargs : dict, optional
Passed to :meth:`matplotlib.axes.Axes.axvline` in matplotlib.
backend_kwargs : dict, optional
These are kwargs specific to the backend being used. Passed to
:func:`matplotlib.pyplot.subplots`.
figsize : tuple, optional
Figure size. If None, it will be defined automatically.
textsize : float, optional
Text size scaling factor for labels, titles and lines. If None, it will be
autoscaled based on ``figsize``.
Returns
-------
axes: matplotlib axes or bokeh figures.
See Also
--------
plot_lm : Posterior predictive and mean plots for regression-like data.
plot_ppc : Plot for posterior/prior predictive checks.
Examples
--------
Plot timeseries default plot
.. plot::
:context: close-figs
>>> import arviz as az
>>> nchains, ndraws = (4, 500)
>>> obs_data = {
... "y": 2 * np.arange(1, 9) + 3,
... "z": 2 * np.arange(8, 12) + 3,
... }
>>> posterior_predictive = {
... "y": np.random.normal(
... (obs_data["y"] * 1.2) - 3, size=(nchains, ndraws, len(obs_data["y"]))
... ),
... "z": np.random.normal(
... (obs_data["z"] * 1.2) - 3, size=(nchains, ndraws, len(obs_data["z"]))
... ),
... }
>>> idata = az.from_dict(
... observed_data=obs_data,
... posterior_predictive=posterior_predictive,
... coords={"obs_dim": np.arange(1, 9), "pred_dim": np.arange(8, 12)},
... dims={"y": ["obs_dim"], "z": ["pred_dim"]},
... )
>>> ax = az.plot_ts(idata=idata, y="y", y_holdout="z")
Plot timeseries multidim plot
.. plot::
:context: close-figs
>>> ndim1, ndim2 = (5, 7)
>>> data = {
... "y": np.random.normal(size=(ndim1, ndim2)),
... "z": np.random.normal(size=(ndim1, ndim2)),
... }
>>> posterior_predictive = {
... "y": np.random.randn(nchains, ndraws, ndim1, ndim2),
... "z": np.random.randn(nchains, ndraws, ndim1, ndim2),
... }
>>> const_data = {"x": np.arange(1, 6), "x_pred": np.arange(5, 10)}
>>> idata = az.from_dict(
... observed_data=data,
... posterior_predictive=posterior_predictive,
... constant_data=const_data,
... dims={
... "y": ["dim1", "dim2"],
... "z": ["holdout_dim1", "holdout_dim2"],
... },
... coords={
... "dim1": range(ndim1),
... "dim2": range(ndim2),
... "holdout_dim1": range(ndim1 - 1, ndim1 + 4),
... "holdout_dim2": range(ndim2 - 1, ndim2 + 6),
... },
... )
>>> az.plot_ts(
... idata=idata,
... y="y",
... plot_dim="dim1",
... y_holdout="z",
... holdout_dim="holdout_dim1",
... )
"""
# Assign default values if none is provided
y_hat = y if y_hat is None and isinstance(y, str) else y_hat
y_forecasts = y_holdout if y_forecasts is None and isinstance(y_holdout, str) else y_forecasts
# holdout_dim = plot_dim if holdout_dim is None and plot_dim is not None else holdout_dim
if isinstance(y, str):
y = idata.observed_data[y]
if isinstance(y_holdout, str):
y_holdout = idata.observed_data[y_holdout]
if len(y.dims) > 1 and plot_dim is None:
raise ValueError("Argument plot_dim is needed in case of multidimensional data")
if y_holdout is not None and len(y_holdout.dims) > 1 and holdout_dim is None:
raise ValueError("Argument holdout_dim is needed in case of multidimensional data")
# Assigning values to x
x_var_names = None
if isinstance(x, str):
x = idata.constant_data[x]
elif isinstance(x, tuple):
x_var_names = x
x = idata.constant_data
elif x is None:
if plot_dim is None:
x = y.coords[y.dims[0]]
else:
x = y.coords[plot_dim]
# If posterior_predictive is present in idata and y_hat is there, get its values
if isinstance(y_hat, str):
if "posterior_predictive" not in idata.groups():
warnings.warn("posterior_predictive not found in idata", UserWarning)
y_hat = None
elif hasattr(idata.posterior_predictive, y_hat):
y_hat = idata.posterior_predictive[y_hat]
else:
warnings.warn("y_hat not found in posterior_predictive", UserWarning)
y_hat = None
# If posterior_predictive is present in idata and y_forecasts is there, get its values
x_holdout_var_names = None
if isinstance(y_forecasts, str):
if "posterior_predictive" not in idata.groups():
warnings.warn("posterior_predictive not found in idata", UserWarning)
y_forecasts = None
elif hasattr(idata.posterior_predictive, y_forecasts):
y_forecasts = idata.posterior_predictive[y_forecasts]
else:
warnings.warn("y_hat not found in posterior_predictive", UserWarning)
y_forecasts = None
# Assign values to y_holdout
if isinstance(y_holdout, str):
y_holdout = idata.observed_data[y_holdout]
# Assign values to x_holdout.
if y_holdout is not None or y_forecasts is not None:
if x_holdout is None:
if holdout_dim is None:
if y_holdout is None:
x_holdout = y_forecasts.coords[y_forecasts.dims[-1]]
else:
x_holdout = y_holdout.coords[y_holdout.dims[-1]]
else:
if y_holdout is None:
x_holdout = y_forecasts.coords[holdout_dim]
else:
x_holdout = y_holdout.coords[holdout_dim]
elif isinstance(x_holdout, str):
x_holdout = idata.constant_data[x_holdout]
elif isinstance(x_holdout, tuple):
x_holdout_var_names = x_holdout
x_holdout = idata.constant_data
# Choose dims to generate y plotters
if plot_dim is None:
skip_dims = list(y.dims)
elif isinstance(plot_dim, str):
skip_dims = [plot_dim]
elif isinstance(plot_dim, tuple):
skip_dims = list(plot_dim)
# Choose dims to generate y_holdout plotters
if holdout_dim is None:
if y_holdout is not None:
skip_holdout_dims = list(y_holdout.dims)
elif y_forecasts is not None:
skip_holdout_dims = list(y_forecasts.dims)
elif isinstance(holdout_dim, str):
skip_holdout_dims = [holdout_dim]
elif isinstance(holdout_dim, tuple):
skip_holdout_dims = list(holdout_dim)
# Compulsory plotters
y_plotters = list(
xarray_var_iter(
y,
skip_dims=set(skip_dims),
combined=True,
)
)
# Compulsory plotters
x_plotters = list(
xarray_var_iter(
x,
var_names=x_var_names,
skip_dims=set(x.dims),
combined=True,
)
)
# Necessary when multidim y
# If there are multiple x and multidimensional y, we need total of len(x)*len(y) graphs
len_y = len(y_plotters)
len_x = len(x_plotters)
length_plotters = len_x * len_y
y_plotters = np.tile(y_plotters, (len_x, 1))
x_plotters = np.tile(x_plotters, (len_y, 1))
# Generate plotters for all the available data
y_mean_plotters = None
y_hat_plotters = None
if y_hat is not None:
total_samples = y_hat.sizes["chain"] * y_hat.sizes["draw"]
pp_sample_ix = np.random.choice(total_samples, size=num_samples, replace=False)
y_hat_satcked = y_hat.stack(__sample__=("chain", "draw"))[..., pp_sample_ix]
y_hat_plotters = list(
xarray_var_iter(
y_hat_satcked,
skip_dims=set(skip_dims + ["__sample__"]),
combined=True,
)
)
y_mean = y_hat.mean(("chain", "draw"))
y_mean_plotters = list(
xarray_var_iter(
y_mean,
skip_dims=set(skip_dims),
combined=True,
)
)
# Necessary when multidim y
# If there are multiple x and multidimensional y, we need total of len(x)*len(y) graphs
y_hat_plotters = np.tile(y_hat_plotters, (len_x, 1))
y_mean_plotters = np.tile(y_mean_plotters, (len_x, 1))
y_holdout_plotters = None
x_holdout_plotters = None
if y_holdout is not None:
y_holdout_plotters = list(
xarray_var_iter(
y_holdout,
skip_dims=set(skip_holdout_dims),
combined=True,
)
)
x_holdout_plotters = list(
xarray_var_iter(
x_holdout,
var_names=x_holdout_var_names,
skip_dims=set(x_holdout.dims),
combined=True,
)
)
# Necessary when multidim y
# If there are multiple x and multidimensional y, we need total of len(x)*len(y) graphs
y_holdout_plotters = np.tile(y_holdout_plotters, (len_x, 1))
x_holdout_plotters = np.tile(x_holdout_plotters, (len_y, 1))
y_forecasts_plotters = None
y_forecasts_mean_plotters = None
if y_forecasts is not None:
total_samples = y_forecasts.sizes["chain"] * y_forecasts.sizes["draw"]
pp_sample_ix = np.random.choice(total_samples, size=num_samples, replace=False)
y_forecasts_satcked = y_forecasts.stack(__sample__=("chain", "draw"))[..., pp_sample_ix]
y_forecasts_plotters = list(
xarray_var_iter(
y_forecasts_satcked,
skip_dims=set(skip_holdout_dims + ["__sample__"]),
combined=True,
)
)
y_forecasts_mean = y_forecasts.mean(("chain", "draw"))
y_forecasts_mean_plotters = list(
xarray_var_iter(
y_forecasts_mean,
skip_dims=set(skip_holdout_dims),
combined=True,
)
)
x_holdout_plotters = list(
xarray_var_iter(
x_holdout,
var_names=x_holdout_var_names,
skip_dims=set(x_holdout.dims),
combined=True,
)
)
# Necessary when multidim y
# If there are multiple x and multidimensional y, we need total of len(x)*len(y) graphs
y_forecasts_mean_plotters = np.tile(y_forecasts_mean_plotters, (len_x, 1))
y_forecasts_plotters = np.tile(y_forecasts_plotters, (len_x, 1))
x_holdout_plotters = np.tile(x_holdout_plotters, (len_y, 1))
rows, cols = default_grid(length_plotters)
tsplot_kwargs = dict(
x_plotters=x_plotters,
y_plotters=y_plotters,
y_mean_plotters=y_mean_plotters,
y_hat_plotters=y_hat_plotters,
y_holdout_plotters=y_holdout_plotters,
x_holdout_plotters=x_holdout_plotters,
y_forecasts_plotters=y_forecasts_plotters,
y_forecasts_mean_plotters=y_forecasts_mean_plotters,
num_samples=num_samples,
length_plotters=length_plotters,
rows=rows,
cols=cols,
backend_kwargs=backend_kwargs,
y_kwargs=y_kwargs,
y_hat_plot_kwargs=y_hat_plot_kwargs,
y_mean_plot_kwargs=y_mean_plot_kwargs,
vline_kwargs=vline_kwargs,
textsize=textsize,
figsize=figsize,
legend=legend,
axes=axes,
show=show,
)
if backend is None:
backend = rcParams["plot.backend"]
backend = backend.lower()
plot = get_plotting_function("plot_ts", "tsplot", backend)
ax = plot(**tsplot_kwargs)
return ax
|
|
from lab.nodes import LabNode
class N9(LabNode):
def __init__(self, **kwargs):
super(N9, self).__init__(**kwargs)
self._ports = None
self._port_channels = None
self._vlans = None
self._neighbours_lldp = None
self._neighbours_cdp = None
self._vpc_domain = None
@property
def ports(self):
if not self._ports:
self.n9_get_status()
return self._ports
@property
def port_channels(self):
if not self._port_channels:
self.n9_get_status()
return self._port_channels
@property
def neighbours_lldp(self):
if not self._neighbours_lldp:
self.n9_get_status()
return self._neighbours_lldp
@property
def neighbours_cdp(self):
if not self._neighbours_cdp:
self.n9_get_status()
return self._neighbours_cdp
@property
def vlans(self):
if not self._vlans:
self.n9_get_status()
return self._vlans
@property
def vpc_domain(self):
if self._vlans is None:
self.n9_get_status()
return self._vpc_domain
def n9_allow_feature_nxapi(self):
from fabric.api import settings, run
with settings(host_string='{user}@{ip}'.format(user=self.oob_username, ip=self.oob_ip), password=self.oob_password):
if 'disabled'in run('sh feature | i nxapi', shell=False):
run('conf t ; feature nxapi', shell=False)
def _rest_api(self, commands, timeout=5, method='cli'):
import requests
import json
body = [{"jsonrpc": "2.0", "method": method, "params": {"cmd": x, "version": 1}, "id": i} for i, x in enumerate(commands, start=1)]
url = 'http://{0}/ins'.format(self.oob_ip)
try:
data = json.dumps(body)
result = requests.post(url, auth=(self.oob_username, self.oob_password), headers={'content-type': 'application/json-rpc'}, data=data, timeout=timeout)
if result.ok:
return result.json()
else:
raise RuntimeError('{}: {} {} {}'.format(self, url, body, result.text))
except requests.exceptions.ConnectionError:
self.n9_allow_feature_nxapi()
return self._rest_api(commands=commands, timeout=timeout)
except requests.exceptions.ReadTimeout:
raise RuntimeError('{}: timed out after {} secs'.format(self, timeout))
def n9_cmd(self, commands, timeout=5):
d = {}
if type(commands) is not list:
is_not_list = True
commands = [commands]
else:
is_not_list = False
self.log_debug('executing ' + ' '.join(commands))
results = self._rest_api(commands=[commands] if type(commands) is not list else commands, timeout=timeout)
if is_not_list:
results = [results]
for c, r in zip(commands, results):
if 'result' not in r or r['result'] is None:
d[c] = []
else:
r = r['result']['body']
d[c] = r.values()[0].values()[0] if len(r) == 1 else r
return d
def get_actual_hostname(self):
res = self.cmd(['sh switchname'])
return res['result']['body']['hostname']
def cmd(self, commands, timeout=15, method='cli'):
if type(commands) is not list: # it might be provided as a string where commands are separated by ','
commands = commands.strip('[]')
commands = commands.split(',')
results = self._rest_api(commands=commands, timeout=int(timeout), method=method)
if len(commands) == 1:
results = [results]
for i, x in enumerate(results, start=0):
if 'error' in x:
raise NameError('{cmd} : {msg}'.format(msg=x['error']['data']['msg'].strip('%\n'), cmd=commands[i]))
return dict(results[0])
def find_neighbour_with_mac(self, mac, cimc_port_id):
from lab.nodes.n9.n9_neighbour import N9neighbourLLDP
return N9neighbourLLDP.find_with_mac(mac=mac, cimc_port_id=cimc_port_id, neighbours=self.neighbours_lldp)
def n9_change_port_state(self, port_no, port_state="no shut"):
"""
Change port state of the port
:param port_no: should be in full format like e1/3 or po1
:param port_state: 'shut' or 'no shut'
"""
self.cmd(['conf t', 'int {}'.format(port_no), port_state])
def n9_get_status(self):
from lab.nodes.n9.n9_neighbour import N9neighbourLLDP, N9neighbourCDP
from lab.nodes.n9.n9_port import N9Port
from lab.nodes.n9.n9_port_channel import N9PortChannel
from lab.nodes.n9.n9_vlan import N9Vlan
from lab.nodes.n9.n9_vlan_port import N9VlanPort
a = self.n9_cmd(['sh port-channel summary', 'sh int st', 'sh int br', 'sh vlan', 'sh cdp nei det', 'sh lldp nei det'] + (['sh vpc'] if self.id != 'nc' else []), timeout=30)
self._neighbours_lldp = N9neighbourLLDP.process_n9_answer(n9=self, answer=a['sh lldp nei det'])
self._neighbours_cdp = N9neighbourCDP.process_n9_answer(n9=self, answer=a['sh cdp nei det'])
self._vlans = N9Vlan.process_n9_answer(n9=self, answer=a['sh vlan'])
if 'sh vpc' in a:
peer_tbl = a['sh vpc'].get('TABLE_peerlink', {'ROW_peerlink': []})['ROW_peerlink']
vpc_tbl = a['sh vpc'].get('TABLE_vpc', {'ROW_vpc': []})['ROW_vpc']
vpc_lst = [vpc_tbl] if type(vpc_tbl) is dict else vpc_tbl # if there is only one vpc the API returns dict but not a list. Convert to list
sh_vpc_dics = {x['vpc-ifindex'].replace('Po', 'port-channel'): x for x in vpc_lst}
assert len(sh_vpc_dics) == int(a['sh vpc']['num-of-vpcs']) # this is a number of vpc excluding peer-link vpc
if peer_tbl:
sh_vpc_dics[peer_tbl['peerlink-ifindex'].replace('Po', 'port-channel')] = peer_tbl
else:
sh_vpc_dics = {}
sh_pc_sum_lst = [a['sh port-channel summary']] if type(a['sh port-channel summary']) is dict else a['sh port-channel summary'] # if there is only one port-channel the API returns dict but not a list. Convert to list
sh_pc_sum_dics = {x['port-channel']: x for x in sh_pc_sum_lst}
self._ports = {}
self._port_channels = {}
for st, br in zip(a['sh int st'], a['sh int br']):
port_id = st['interface']
if port_id.startswith('port-channel'):
self._port_channels[port_id] = N9PortChannel(n9=self, sh_int_st_dic=st, sh_int_br_dic=br, sh_pc_sum_dic=sh_pc_sum_dics[port_id], sh_vpc_dic=sh_vpc_dics.get(port_id))
elif port_id.startswith('Vlan'):
self._ports[port_id] = N9VlanPort(n9=self, sh_int_st_dic=st, sh_int_br_dic=br)
elif port_id.startswith('Ethernet'):
self._ports[port_id] = N9Port(n9=self, sh_int_st_dic=st, sh_int_br_dic=br)
else:
continue
def n9_validate(self):
from lab.nodes.n9.n9_vlan import N9Vlan
from lab.nodes.n9.n9_port_channel import N9PortChannel
from lab.nodes.n9.n9_port import N9Port
special_pod_name = self.pod.name.replace('-', '').replace('vts', '').replace('vpp', '')[:3]
map(lambda v: self.vlans.get(str(v.vlan), N9Vlan.create(n9=self, vlan_id=v.vlan)).handle_vlan(vlan_name=special_pod_name + '-' + v.id), self.pod.networks.values())
checked = []
for wire in [x for x in self.pod.wires if self in [x.n1, x.n2]]:
own_port_id = wire.get_own_port(node=self)
if wire.is_n9_ucs(): # it's a potential connection to our node
if 'adaptor-MLOM' in wire.port_id1:
a = 'M' + wire.port_id1[-1]
port_mode = 'trunk'
elif 'adapter-L' in wire.port_id1:
a = 'L' + wire.port_id1[-1]
port_mode = 'trunk'
else:
a = ' TREX' + wire.port_id1[-1]
port_mode = 'trunk'
desc_port = special_pod_name + wire.n1.short + ' ' + a + ' ' + wire.mac + ' ' + wire.n1.oob_ip
desc_pc = desc_port
pc_id = wire.pc_id
vlans = ','.join(sorted([str(x.vlan) for x in wire.n1.networks_dic.values()])) + ',' + self.pod.setup_data_dic['TENANT_VLAN_RANGES'].replace(':', '-')
elif wire.is_n9_oob():
continue
elif wire.is_n9_n9(): # it's a potential peer link
pc_id = wire.pc_id
desc_port = 'peerlink ' + wire.port_id2.strip('Ethernet')
desc_pc = 'peerlink'
port_mode = 'trunk'
vlans = 'all'
elif wire.is_n9_tor():
pc_id = wire.pc_id
desc_port = 'uplink ' + wire.port_id2.strip('Ethernet')
desc_pc = desc_port
port_mode = 'trunk'
vlans = 'all'
else:
pc_id = None
port_mode = None
vlans = None
desc_port = 'XXX'
desc_pc = desc_port
if pc_id and pc_id not in checked:
N9PortChannel.check_create(n9=self, pc_id=pc_id, desc=desc_pc, mode=port_mode, vlans=vlans)
self.ports[own_port_id].check(pc_id=pc_id, port_name=desc_port, port_mode=port_mode, vlans=vlans)
self.log(60 * '-')
def n9_fix_problem(self, cmd, msg):
from fabric.operations import prompt
import time
self.log('{} do: {}'.format(msg, ' '.join(cmd)))
time.sleep(1) # prevent prompt message interlacing
if prompt('say y if you want to fix it: ') == 'y':
self.n9_cmd(cmd)
def n9_configure_vxlan(self, asr_port):
import re
number_in_node_id = map(int, re.findall(r'\d+', self.id))[0]
lo1_ip = '1.1.1.{0}'.format(number_in_node_id)
lo2_ip = '2.2.2.{0}'.format(number_in_node_id)
router_ospf = '111'
router_area = '0.0.0.0'
eth48_ip = '169.0.{0}.1'.format(number_in_node_id)
self.cmd(['conf t', 'feature ospf'])
self.cmd(['conf t', 'feature pim'])
self.cmd(['conf t', 'interface loopback 1'])
self.cmd(['conf t', 'interface loopback 2'])
self.cmd(['conf t', 'interface loopback 1', 'ip address {0}/32'.format(lo1_ip)])
self.cmd(['conf t', 'interface loopback 1', 'ip router ospf {0} area {1}'.format(router_ospf, router_area)])
self.cmd(['conf t', 'interface loopback 2', 'ip address {0}/32'.format(lo2_ip)])
self.cmd(['conf t', 'interface loopback 2', 'ip router ospf {0} area {1}'.format(router_ospf, router_area)])
self.cmd(['conf t', 'interface ethernet {0}'.format(asr_port), 'no switchport'])
self.cmd(['conf t', 'interface ethernet {0}'.format(asr_port), 'ip address {0}/30'.format(eth48_ip)])
self.cmd(['conf t', 'interface ethernet {0}'.format(asr_port), 'ip router ospf {0} area {1}'.format(router_ospf, router_area)])
def n9_configure_asr1k(self):
self.cmd(['conf t', 'int po{0}'.format(self.get_peer_link_id()), 'shut'])
asr = filter(lambda x: x.is_n9_asr(), self.wires)
self.n9_configure_vxlan(asr[0].get_own_port(self))
def cleanup(self):
del_vlan_interfaces = ['no int ' + x for x in self.ports.keys() if x.startswith('Vlan')]
del_port_channels = ['no int ' + x for x in self.port_channels.keys()]
vlan_ids = set(self._vlans.keys()) - {'1'}
del_vlans = ['no vlan ' + ','.join(vlan_ids[i:i + 64]) for i in range(0, len(vlan_ids), 64)] # need to slice since no more then 64 ids allowed per operation
del_vpc_domain = ['no vpc domain ' + self.vpc_domain.domain_id] if self.vpc_domain.is_configured else []
last_port_id = max(map(lambda name: 0 if 'Ethernet' not in name else int(name.split('/')[-1]), self.ports.keys()))
reset_ports = ['default int e 1/1-' + str(last_port_id)]
self.n9_cmd(['conf t'] + del_vlan_interfaces + del_port_channels + del_vlans + del_vpc_domain + reset_ports, timeout=60)
def n9_show_bgp_l2vpn_evpn(self):
return self.cmd('sh bgp l2vpn evpn')
def n9_show_bgp_sessions(self):
return self.cmd('sh bgp sessions')
def n9_show_bgp_all(self):
return self.cmd('sh bgp all')
def n9_show_running_config(self):
return self.cmd(commands=['sh run'], method='cli_ascii')['result']['msg']
def n9_show_l2route_evpn_mac_all(self):
return self.cmd(' sh l2route evpn mac all')
def n9_show_users(self):
res = self.cmd(['show users'])
if res == 'timeout':
return []
if res['result']:
return res['result']['body']['TABLE_sessions']['ROW_sessions']
else:
return [] # no current session
def n9_show_nve_peers(self):
r = self.cmd('sh nve peers')
return r['result']['body']['TABLE_nve_peers']['ROW_nve_peers'] if r['result'] else {}
def r_collect_config(self):
return self.single_cmd_output(cmd='show running config', ans=self.n9_show_running_config())
|
|
from direct.distributed import DistributedObjectAI
from direct.directnotify import DirectNotifyGlobal
from toontown.toonbase import ToontownGlobals
from panda3d.core import *
from panda3d.direct import *
from direct.fsm.FSM import FSM
from toontown.ai.ToonBarrier import *
from toontown.golf import GolfGlobals
import random
import math
class GolfHoleBase:
def __init__(self, canRender = 0):
self.canRender = canRender
self.recording = []
self.aVRecording = []
self.holePositions = []
self.grayCount = 0
self.skyContact = None
self.lastSkyContactPoint = None
self.doingRecording = 0
self.backAmount = 270
self.ballRocket = 0
self.inCount = 0
self.frame = 0
self.onSlick = 0
self.didHoleBreak = 0
return
def loadLevel(self):
tm = self.holeInfo['terrainModel']
self.terrainModel = loader.loadModel(tm)
td = self.holeInfo['physicsData']
if self.canRender:
self.terrainModel.reparentTo(render)
if self.canRender:
self.terrainModel.find('**/softSurface').setBin('ground', 0)
terrainData = self.terrainModel.find('**/softSurface')
grassData = terrainData.findAllMatches('**/grass*')
self.terrainData = []
for index in xrange(grassData.getNumPaths()):
someTerrainData = grassData[index]
terrainDataOde = OdeTriMeshData(someTerrainData)
self.meshDataList.append(terrainDataOde)
terrainGeomOde = OdeTriMeshGeom(self.space, terrainDataOde)
self.geomDataList.append(terrainGeomOde)
terrainGeomOde.setCollideBits(BitMask32(4026531840L))
terrainGeomOde.setCategoryBits(BitMask32(240))
self.space.setSurfaceType(terrainGeomOde, GolfGlobals.GRASS_SURFACE)
self.space.setCollideId(terrainGeomOde, 2)
slickData = terrainData.findAllMatches('**/slick*')
self.terrainData = []
for index in xrange(slickData.getNumPaths()):
someTerrainData = slickData[index]
terrainDataOde = OdeTriMeshData(someTerrainData)
self.meshDataList.append(terrainDataOde)
terrainGeomOde = OdeTriMeshGeom(self.space, terrainDataOde)
self.geomDataList.append(terrainGeomOde)
terrainGeomOde.setCollideBits(BitMask32(4026531840L))
terrainGeomOde.setCategoryBits(BitMask32(240))
self.space.setSurfaceType(terrainGeomOde, GolfGlobals.SLICK_SURFACE)
self.space.setCollideId(terrainGeomOde, GolfGlobals.SLICK_COLLIDE_ID)
cupData = terrainData.find('**/hole*')
cupData = OdeTriMeshData(cupData)
self.meshDataList.append(cupData)
cupGeom = OdeTriMeshGeom(self.space, cupData)
self.geomDataList.append(cupGeom)
cupGeom.setCollideBits(BitMask32(4026531840L))
cupGeom.setCategoryBits(BitMask32(240))
self.space.setSurfaceType(cupGeom, GolfGlobals.HOLE_SURFACE)
self.space.setCollideId(cupGeom, GolfGlobals.HOLE_CUP_COLLIDE_ID)
if self.canRender:
self.golfBarrier = self.terrainModel.find('**/collision1')
if not self.golfBarrier.isEmpty():
golfBarrierCollection = self.terrainModel.findAllMatches('**/collision?')
for i in xrange(golfBarrierCollection.getNumPaths()):
oneBarrier = golfBarrierCollection.getPath(i)
if oneBarrier != self.golfBarrier:
oneBarrier.wrtReparentTo(self.golfBarrier)
self.golfBarrier.hide()
else:
self.notify.warning('Could not find collision1 node ---------')
self.hardSurfaceNodePath = self.terrainModel.find('**/hardSurface')
if self.canRender:
self.terrainModel.find('**/hardSurface').setBin('ground', 0)
self.loadBlockers()
hardData = OdeTriMeshData(self.hardSurfaceNodePath)
self.meshDataList.append(hardData)
hardGeom = OdeTriMeshGeom(self.space, hardData)
self.geomDataList.append(hardGeom)
hardGeom.setCollideBits(BitMask32(4026531840L))
hardGeom.setCategoryBits(BitMask32(240))
self.space.setCollideId(hardGeom, 3)
hardSurface = self.space.getSurfaceType(hardGeom)
self.notify.debug('hardSurface = %s' % hardSurface)
if self.notify.getDebug():
self.notify.debug('self.hardGeom')
hardGeom.write()
self.notify.debug(' -')
self.holeBottomNodePath = self.terrainModel.find('**/holebottom0')
if self.holeBottomNodePath.isEmpty():
self.holeBottomPos = Vec3(*self.holeInfo['holePos'][0])
else:
self.holeBottomPos = self.holeBottomNodePath.getPos()
self.holePositions.append(self.holeBottomPos)
def isBallInHole(self, ball):
retval = False
for holePos in self.holePositions:
displacement = ball.getPosition() - holePos
length = displacement.length()
self.notify.debug('hole %s length=%s' % (holePos, length))
if length <= GolfGlobals.DistanceToBeInHole * 0.5:
retval = True
break
return retval
def createRays(self):
self.notify.debug('createRays')
body = OdeBody(self.world)
self.ballRay = OdeRayGeom(self.space, 50.0)
self.ballRay.setBody(body)
self.ballRay.setOffsetRotation(Mat3(1, 0, 0, 0, -1, 0, 0, 0, -1))
self.ballRay.setOffsetPosition(0, 0, 0.0)
self.ballRay.setCollideBits(BitMask32(16773375))
self.ballRay.setCategoryBits(BitMask32(4278190080L))
self.ballRayBody = body
self.space.setCollideId(self.ballRay, GolfGlobals.OOB_RAY_COLLIDE_ID)
self.rayList.append(self.ballRay)
self.rayList.append(self.ballRayBody)
self.skyRay = OdeRayGeom(self.space, 100.0)
self.skyRay.setCollideBits(BitMask32(240))
self.skyRay.setCategoryBits(BitMask32(0))
self.skyRay.setRotation(Mat3(1, 0, 0, 0, -1, 0, 0, 0, -1))
self.space.setCollideId(self.skyRay, GolfGlobals.SKY_RAY_COLLIDE_ID)
self.rayList.append(self.skyRay)
def delete(self):
self.ballRay = None
self.skyRay = None
self.recording = None
self.avRecording = None
self.llv = None
return
def initRecord(self):
del self.recording
self.recording = []
del self.aVRecording
self.aVRecording = []
self.skipFrame = 0.0
self.frame = 0
self.tXYMax = 1.0
self.tZMax = 1.0
self.tXYMin = 0.1
self.tZMin = 0.1
self.skyContact = 1
self.doingRecording = 1
self.ballRocket = 0
self.inCount = 0
self.ballInHoleFrame = 0
self.ballTouchedHoleFrame = 0
self.ballFirstTouchedHoleFrame = 0
self.ballLastTouchedGrass = 0
self.hasReset = 0
self.resetAt = 100000
self.greenIn = 0
for key in self.commonObjectDict:
self.commonObjectDict[key][2].enable()
def checkCommonObjectsNeedPass(self):
for index in self.commonObjectDict:
if self.commonObjectDict[index][1] in [4]:
return 1
return 0
def checkInRadius(self, ball):
smallestDist = None
for index in self.commonObjectDict:
if self.commonObjectDict[index][1] in [4]:
radius = self.commonObjectDict[index][8]
mover = self.commonObjectDict[index][2]
diffX = ball.getPosition()[0] - mover.getPosition()[0]
diffY = ball.getPosition()[1] - mover.getPosition()[1]
diffZ = ball.getPosition()[2] - mover.getPosition()[2]
dist = math.sqrt(diffX * diffX + diffY * diffY + diffZ * diffZ)
if dist < radius:
if not smallestDist or smallestDist[1] > dist:
smallestDist = [radius, dist]
self.notify.debug('Ball Pos %s\nMover Pos %s' % (ball.getPosition(), mover.getPosition()))
return smallestDist
def trackRecordBodyFlight(self, ball, cycleTime, power, startPos, dirX, dirY):
self.notify.debug('trackRecordBodyFlight')
self.ballInHoleFrame = 0
self.ballTouchedHoleFrame = 0
self.ballFirstTouchedHoleFrame = 0
self.ballLastTouchedGrass = 0
startTime = globalClock.getRealTime()
self.notify.debug('start position %s' % startPos)
self.swingTime = cycleTime
frameCount = 0
lift = 0
startTime = GolfGlobals.BALL_CONTACT_FRAME / 24
startFrame = int(startTime * self.FPS)
for frame in xrange(int(startFrame)):
self.simulate()
self.setTimeIntoCycle(self.swingTime + float(frameCount) * self.DTAStep)
frameCount += 1
forceMove = 1500
if power > 50:
lift = 0
self.didHoleBreak = 0
ball.setPosition(startPos)
ball.setLinearVel(0.0, 0.0, 0.0)
ball.setAngularVel(0.0, 0.0, 0.0)
ball.enable()
self.preStep()
self.simulate()
self.postStep()
ball.enable()
ball.addForce(Vec3(dirX * forceMove * power / 100.0, dirY * forceMove * power / 100.0, lift))
self.initRecord()
self.llv = None
self.lastSkyContactPoint = None
ran = 0
self.record(ball)
self.comObjNeedPass = self.checkCommonObjectsNeedPass()
self.notify.debug('self.comObjNeedPass %s' % self.comObjNeedPass)
firstDisabled = -1
reEnabled = 0
lastFrameEnabled = 0
checkFrames = self.FPS * (self.timingCycleLength + 1.0)
hasPrinted = 0
while ball.isEnabled() and len(self.recording) < 2100 or self.comObjNeedPass or len(self.recording) < 10:
ran = 1
if len(self.recording) > 2100 and not hasPrinted:
self.notify.debug('recording too long %s' % len(self.recording))
hasPrinted = 1
ball.disable()
self.preStep()
self.simulate()
self.setTimeIntoCycle(self.swingTime + float(frameCount) * self.DTAStep)
frameCount += 1
self.postStep()
self.record(ball)
if self.comObjNeedPass:
if firstDisabled == -1 and not ball.isEnabled():
firstDisabled = self.frame
self.notify.debug('firstDisabled %s' % firstDisabled)
check = self.checkInRadius(ball)
if check == None:
self.comObjNeedPass = 0
self.notify.debug('out radius')
else:
self.notify.debug('in radius %s dist %s' % (check[0], check[1]))
elif ball.isEnabled() and firstDisabled != -1 and not reEnabled:
reEnabled = self.frame
self.notify.debug('reEnabled %s' % reEnabled)
if reEnabled:
if self.frame > reEnabled + checkFrames:
self.comObjNeedPass = 0
self.notify.debug('renable limit passed')
elif self.frame > 2100 + checkFrames:
self.comObjNeedPass = 0
print 'recording limit passed comObj'
if ball.isEnabled():
lastFrameEnabled = self.frame
self.notify.debug('lastFrameEnabled %s' % lastFrameEnabled)
if lastFrameEnabled < 3:
lastFrameEnabled = 3
self.record(ball)
self.notify.debug('Frames %s' % self.frame)
midTime = globalClock.getRealTime()
self.recording = self.recording[:lastFrameEnabled]
self.aVRecording = self.aVRecording[:lastFrameEnabled]
self.frame = lastFrameEnabled
self.processRecording()
self.processAVRecording()
self.notify.debug('Recording End time %s cycle %s len %s avLen %s' % (self.timingSimTime,
self.getCycleTime(),
len(self.recording),
len(self.aVRecording)))
length = len(self.recording) - 1
x = self.recording[length][1]
y = self.recording[length][2]
z = self.recording[length][3]
endTime = globalClock.getRealTime()
diffTime = endTime - startTime
self.doingRecording = 0
fpsTime = self.frame / diffTime
self.notify.debug('Time Start %s Mid %s End %s Diff %s Fps %s frames %s' % (startTime,
midTime,
endTime,
diffTime,
fpsTime,
self.frame))
return Vec3(x, y, z)
def record(self, ball):
self.recording.append((self.frame,
ball.getPosition()[0],
ball.getPosition()[1],
ball.getPosition()[2]))
self.aVRecording.append((self.frame,
ball.getAngularVel()[0],
ball.getAngularVel()[1],
ball.getAngularVel()[2]))
if self.frame > 50 and not self.frame % 13:
curFrame = self.recording[self.frame]
pastFrame5 = self.recording[self.frame - 11]
pastFrame10 = self.recording[self.frame - 34]
currPosA = Vec3(curFrame[1], curFrame[2], curFrame[3])
past5PosA = Vec3(pastFrame5[1], pastFrame5[2], pastFrame5[3])
past10PosA = Vec3(pastFrame10[1], pastFrame10[2], pastFrame10[3])
displacement1 = currPosA - past5PosA
displacement2 = currPosA - past10PosA
if displacement1.lengthSquared() < 0.002 and displacement2.lengthSquared() < 0.002 and not self.grayCount and not self.onSlick:
ball.disable()
self.frame += 1
def preStep(self):
if hasattr(self, 'ballRay'):
bp = self.curGolfBall().getPosition()
self.ballRayBody.setPosition(bp[0], bp[1], bp[2])
self.skyRay.setPosition(bp[0], bp[1], 50.0)
def getOrderedContacts(self, entry):
c0 = self.space.getCollideId(entry.getGeom1())
c1 = self.space.getCollideId(entry.getGeom2())
if c0 > c1:
return (c1, c0)
else:
return (c0, c1)
def postStep(self):
if self.canRender:
self.translucentLastFrame = self.translucentCurFrame[:]
self.translucentCurFrame = []
self.onSlick = 0
rayCount = 0
skyRayHitPos = None
ballRayHitPos = None
bp = self.curGolfBall().getPosition()
for entry in self.colEntries:
c0, c1 = self.getOrderedContacts(entry)
x, y, z = entry.getContactPoint(0)
if c0 == GolfGlobals.OOB_RAY_COLLIDE_ID or c1 == GolfGlobals.OOB_RAY_COLLIDE_ID:
rayCount += 1
if self.canRender:
if self.currentGolfer:
self.ballShadowDict[self.currentGolfer].setPos(x, y, z + 0.1)
if c1 == GolfGlobals.GRASS_COLLIDE_ID or c1 == GolfGlobals.HARD_COLLIDE_ID:
if self.curGolfBall().getPosition()[2] < z + 0.2:
ballRayHitPos = Vec3(x, y, z)
if c0 == GolfGlobals.OOB_RAY_COLLIDE_ID and c1 == GolfGlobals.SLICK_COLLIDE_ID:
self.onSlick = 1
elif c0 == GolfGlobals.OOB_RAY_COLLIDE_ID and c1 == GolfGlobals.HARD_COLLIDE_ID:
self.onSlick = 1
if c0 == GolfGlobals.GRASS_COLLIDE_ID and c1 == GolfGlobals.SKY_RAY_COLLIDE_ID:
self.lastSkyContactPoint = (x, y, z)
if self.curGolfBall().getPosition()[2] < z + 0.2 and rayCount == 0:
if self.skyContact in [1, 2]:
skyRayHitPos = Vec3(x, y, z)
self.skyContact += 1
if self.doingRecording:
if c0 == GolfGlobals.OOB_RAY_COLLIDE_ID or c1 == GolfGlobals.OOB_RAY_COLLIDE_ID:
rayCount += 1
if c1 == GolfGlobals.GRASS_COLLIDE_ID:
self.greenIn = self.frame
self.llv = self.curGolfBall().getLinearVel()
elif GolfGlobals.BALL_COLLIDE_ID in [c0, c1] and GolfGlobals.HOLE_CUP_COLLIDE_ID in [c0, c1]:
self.ballTouchedHoleFrame = self.frame
ballUndersideZ = self.curGolfBall().getPosition()[2] - 0.05
if z < ballUndersideZ:
if not self.ballInHoleFrame:
self.ballInHoleFrame = self.frame
if self.ballFirstTouchedHoleFrame < self.ballLastTouchedGrass:
self.ballFirstTouchedHoleFrame = self.frame
if self.isBallInHole(self.curGolfBall()) and self.didHoleBreak == 0:
self.comObjNeedPass = 0
ballLV = self.curGolfBall().getLinearVel()
ballAV = self.curGolfBall().getAngularVel()
self.curGolfBall().setLinearVel(0.5 * ballLV[0], 0.5 * ballLV[1], 0.5 * ballLV[2])
self.curGolfBall().setAngularVel(0.5 * ballAV[0], 0.5 * ballAV[1], 0.5 * ballAV[2])
self.notify.debug('BALL IN THE HOLE!!! FOO!')
self.didHoleBreak = 1
return
elif GolfGlobals.BALL_COLLIDE_ID in [c0, c1] and GolfGlobals.GRASS_COLLIDE_ID in [c0, c1]:
if self.ballInHoleFrame:
self.ballInHoleFrame = 0
self.notify.debug('setting ballInHoleFrame=0')
self.ballLastTouchedGrass = self.frame
elif self.canRender:
if c0 == GolfGlobals.TOON_RAY_COLLIDE_ID or c1 == GolfGlobals.TOON_RAY_COLLIDE_ID:
self.toonRayCollisionCallback(x, y, z)
if GolfGlobals.CAMERA_RAY_COLLIDE_ID in [c0, c1] and GolfGlobals.WINDMILL_BASE_COLLIDE_ID in [c0, c1]:
self.translucentCurFrame.append(self.windmillFanNodePath)
self.translucentCurFrame.append(self.windmillBaseNodePath)
if GolfGlobals.BALL_COLLIDE_ID in [c0, c1] and GolfGlobals.GRASS_COLLIDE_ID not in [c0, c1]:
self.handleBallHitNonGrass(c0, c1)
if not self.curGolfBall().isEnabled():
return
if rayCount == 0:
self.notify.debug('out of bounds detected!')
self.grayCount += 1
self.outCommon = self.getCommonObjectData()
self.inCount = 0
if skyRayHitPos:
self.curGolfBall().setPosition(skyRayHitPos[0], skyRayHitPos[1], skyRayHitPos[2] + 0.27)
self.notify.debug('SKY RAY ADJUST?')
else:
if self.grayCount > 1:
self.notify.debug('Back in bounds')
self.grayCount = 0
self.inCount += 1
if ballRayHitPos:
self.curGolfBall().setPosition(ballRayHitPos[0], ballRayHitPos[1], ballRayHitPos[2] + 0.245)
ballRayHitPos = None
if self.doingRecording:
self.notify.debug('BALL RAY ADJUST!')
self.notify.debug('%s' % self.curGolfBall().getLinearVel())
if self.ballRocket > 0 and self.inCount > 1:
self.ballRocket -= 1
rocketVel = self.curGolfBall().getLinearVel()
self.curGolfBall().setLinearVel(2.0 * rocketVel[0], 2.0 * rocketVel[1], 2.0 * rocketVel[2])
self.notify.debug('ROCKET!!!!')
if self.grayCount > self.backAmount and self.doingRecording:
if self.greenIn > 2:
self.greenIn -= 2
if self.greenIn > self.resetAt:
self.greenIn = self.resetAt - 10
if self.greenIn < 0 or self.hasReset > 3:
self.greenIn = 0
self.hasReset += 1
self.notify.debug('BALL RESET frame %s greenIn %s resetAt %s' % (self.frame, self.greenIn, self.resetAt))
self.useCommonObjectData(self.outCommon)
self.curGolfBall().setPosition(self.recording[self.greenIn][1], self.recording[self.greenIn][2], self.recording[self.greenIn][3] + 0.27)
self.curGolfBall().setAngularVel(0, 0, 0)
if self.hasReset < 3 and self.llv:
self.ballRocket += 1
self.notify.debug(' BRAKE!!!!')
self.curGolfBall().setLinearVel(0.5 * self.llv[0], 0.5 * self.llv[1], 0.5 * self.llv[2])
else:
self.notify.debug('back disable %s' % self.frame)
if self.lastSkyContactPoint:
self.curGolfBall().setPosition(self.lastSkyContactPoint[0], self.lastSkyContactPoint[1], self.lastSkyContactPoint[2] + 0.27)
self.curGolfBall().setLinearVel(0, 0, 0)
self.curGolfBall().disable()
self.recording = self.recording[:self.greenIn]
self.aVRecording = self.aVRecording[:self.greenIn]
self.frame = self.greenIn
self.resetAt = self.greenIn
self.grayCount = 0
if self.ballFirstTouchedHoleFrame > self.frame:
self.notify.debug('reseting first touched hole, self.frame=%d self.ballFirstTouchedHoleFrame=%d' % (self.frame, self.ballFirstTouchedHoleFrame))
self.ballFirstTouchedHoleFrame = 0
if self.ballLastTouchedGrass > self.frame:
self.ballLastTouchedGrass = 0
return
def processRecording(self, errorMult = 1.0):
self.notify.debug('processRecording')
lastFrame = self.recording[len(self.recording) - 1][0]
countRemovals = 0
for frame in self.recording:
if frame[0] == 0 or frame[0] == lastFrame:
pass
else:
index = self.recording.index(frame)
prevFrame = self.recording[index - 1]
nextFrame = self.recording[index + 1]
if self.predict(frame, prevFrame, nextFrame, errorMult):
self.recording.remove(frame)
countRemovals += 1
if countRemovals > 5:
self.processRecording()
elif len(self.recording) > 120:
self.processRecording(errorMult * 1.25)
else:
for frame in self.recording:
pass
def processAVRecording(self, errorMult = 1.0, trials = 0):
self.notify.debug('processAVRecording')
lastFrame = self.recording[len(self.recording) - 1][0]
countRemovals = 0
countTrials = trials
for frame in self.aVRecording:
if frame[0] == 0 or frame[0] == lastFrame:
pass
else:
index = self.aVRecording.index(frame)
prevFrame = self.aVRecording[index - 1]
nextFrame = self.aVRecording[index + 1]
if self.predictAV(frame, prevFrame, nextFrame, errorMult):
self.aVRecording.remove(frame)
countRemovals += 1
else:
countTrials += 1
if countRemovals > 5:
self.processAVRecording(errorMult, countTrials)
elif len(self.aVRecording) > 80:
self.processAVRecording(errorMult * 1.25, countTrials)
else:
for frame in self.aVRecording:
pass
def predict(self, frame, sourceFrame, destFrame, errorMult = 1.0):
tXY = 0.05 * errorMult
tZ = 0.05 * errorMult
projLength = destFrame[0] - sourceFrame[0]
projPen = destFrame[0] - frame[0]
propSource = float(projPen) / float(projLength)
propDest = 1.0 - propSource
projX = sourceFrame[1] * propSource + destFrame[1] * propDest
projY = sourceFrame[2] * propSource + destFrame[2] * propDest
projZ = sourceFrame[3] * propSource + destFrame[3] * propDest
varX = abs(projX - frame[1])
varY = abs(projY - frame[2])
varZ = abs(projZ - frame[3])
if varX > tXY or varY > tXY or varZ > tZ:
return 0
else:
return 1
def predictAV(self, frame, sourceFrame, destFrame, errorMult = 1.0):
tXYZ = 1.5 * errorMult
projLength = destFrame[0] - sourceFrame[0]
projPen = destFrame[0] - frame[0]
propSource = float(projPen) / float(projLength)
propDest = 1.0 - propSource
projX = sourceFrame[1] * propSource + destFrame[1] * propDest
projY = sourceFrame[2] * propSource + destFrame[2] * propDest
projZ = sourceFrame[3] * propSource + destFrame[3] * propDest
varX = abs(projX - frame[1])
varY = abs(projY - frame[2])
varZ = abs(projZ - frame[3])
if varX > tXYZ or varY > tXYZ or varZ > tXYZ:
return 0
else:
return 1
def handleBallHitNonGrass(self, c0, c1):
pass
|
|
# Copyright (c) 2013-2014 Rackspace, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
from unittest import mock
from cryptography import fernet
from cryptography.hazmat.backends import default_backend
from cryptography.hazmat.primitives import serialization
import six
from barbican.model import models
from barbican.plugin.crypto import base as plugin
from barbican.plugin.crypto import simple_crypto as simple
from barbican.tests import utils
class WhenTestingSimpleCryptoPlugin(utils.BaseTestCase):
def setUp(self):
super(WhenTestingSimpleCryptoPlugin, self).setUp()
self.plugin = simple.SimpleCryptoPlugin()
def _get_mocked_kek_meta_dto(self):
# For SimpleCryptoPlugin, per-project KEKs are stored in
# kek_meta_dto.plugin_meta. SimpleCryptoPlugin does a get-or-create
# on the plugin_meta field, so plugin_meta should be None initially.
kek_meta_dto = plugin.KEKMetaDTO(mock.MagicMock())
kek_meta_dto.plugin_meta = None
return self.plugin.bind_kek_metadata(kek_meta_dto)
def test_encrypt_unicode_raises_value_error(self):
unencrypted = u'unicode_beer\U0001F37A'
encrypt_dto = plugin.EncryptDTO(unencrypted)
secret = mock.MagicMock()
secret.mime_type = 'text/plain'
kek_meta_dto = self._get_mocked_kek_meta_dto()
self.assertRaises(
ValueError,
self.plugin.encrypt,
encrypt_dto,
kek_meta_dto,
mock.MagicMock(),
)
def test_encrypt_with_unicode_kek_must_pass(self):
"""Test plan:
Generate a kek
Encrypt with master kek
Convert to unicode
call plugin.encrypt on unencrypted
decrypt response cypher_text
Compare with unencrypted
"""
project_kek = fernet.Fernet.generate_key()
encryptor = fernet.Fernet(self.plugin.master_kek)
ENC_project_kek = encryptor.encrypt(project_kek)
UENC_project_kek = six.u(ENC_project_kek)
kek_meta_dto = self._get_mocked_kek_meta_dto()
kek_meta_dto.plugin_meta = UENC_project_kek
unencrypted = b'PlainTextSecret'
encrypt_dto = plugin.EncryptDTO(unencrypted)
response_dto = self.plugin.encrypt(encrypt_dto,
kek_meta_dto,
mock.MagicMock())
project_encryptor = fernet.Fernet(project_kek)
decrypted = project_encryptor.decrypt(response_dto.cypher_text)
self.assertEqual(unencrypted, decrypted)
def test_decrypt_kek_not_created(self):
kek_meta_dto = mock.MagicMock()
kek_meta_dto.plugin_meta = None
self.assertRaises(
ValueError,
self.plugin.decrypt,
mock.MagicMock(),
kek_meta_dto,
mock.MagicMock(),
mock.MagicMock(),
)
def test_byte_string_encryption(self):
unencrypted = b'some_secret'
encrypt_dto = plugin.EncryptDTO(unencrypted)
kek_meta_dto = self._get_mocked_kek_meta_dto()
response_dto = self.plugin.encrypt(encrypt_dto,
kek_meta_dto,
mock.MagicMock())
decrypt_dto = plugin.DecryptDTO(response_dto.cypher_text)
decrypted = self.plugin.decrypt(decrypt_dto, kek_meta_dto,
response_dto.kek_meta_extended,
mock.MagicMock())
self.assertEqual(unencrypted, decrypted)
def test_random_bytes_encryption(self):
unencrypted = os.urandom(10)
encrypt_dto = plugin.EncryptDTO(unencrypted)
kek_meta_dto = self._get_mocked_kek_meta_dto()
response_dto = self.plugin.encrypt(encrypt_dto,
kek_meta_dto,
mock.MagicMock())
decrypt_dto = plugin.DecryptDTO(response_dto.cypher_text)
decrypted = self.plugin.decrypt(decrypt_dto, kek_meta_dto,
response_dto.kek_meta_extended,
mock.MagicMock())
self.assertEqual(unencrypted, decrypted)
def test_generate_256_bit_key(self):
secret = models.Secret()
secret.bit_length = 256
secret.algorithm = "AES"
kek_meta_dto = self._get_mocked_kek_meta_dto()
generate_dto = plugin.GenerateDTO(
secret.algorithm,
secret.bit_length,
secret.mode, None)
response_dto = self.plugin.generate_symmetric(
generate_dto,
kek_meta_dto,
mock.MagicMock()
)
decrypt_dto = plugin.DecryptDTO(response_dto.cypher_text)
key = self.plugin.decrypt(decrypt_dto, kek_meta_dto,
response_dto.kek_meta_extended,
mock.MagicMock())
self.assertEqual(32, len(key))
def test_generate_192_bit_key(self):
secret = models.Secret()
secret.bit_length = 192
secret.algorithm = "AES"
kek_meta_dto = self._get_mocked_kek_meta_dto()
generate_dto = plugin.GenerateDTO(
secret.algorithm,
secret.bit_length,
None, None)
response_dto = self.plugin.generate_symmetric(
generate_dto,
kek_meta_dto,
mock.MagicMock()
)
decrypt_dto = plugin.DecryptDTO(response_dto.cypher_text)
key = self.plugin.decrypt(decrypt_dto, kek_meta_dto,
response_dto.kek_meta_extended,
mock.MagicMock())
self.assertEqual(24, len(key))
def test_generate_128_bit_key(self):
secret = models.Secret()
secret.bit_length = 128
secret.algorithm = "AES"
kek_meta_dto = self._get_mocked_kek_meta_dto()
generate_dto = plugin.GenerateDTO(
secret.algorithm,
secret.bit_length,
None, None)
response_dto = self.plugin.generate_symmetric(
generate_dto,
kek_meta_dto,
mock.MagicMock()
)
decrypt_dto = plugin.DecryptDTO(response_dto.cypher_text)
key = self.plugin.decrypt(decrypt_dto, kek_meta_dto,
response_dto.kek_meta_extended,
mock.MagicMock())
self.assertEqual(16, len(key))
def test_supports_encrypt_decrypt(self):
self.assertTrue(
self.plugin.supports(plugin.PluginSupportTypes.ENCRYPT_DECRYPT)
)
def test_supports_symmetric_key_generation(self):
self.assertTrue(
self.plugin.supports(
plugin.PluginSupportTypes.SYMMETRIC_KEY_GENERATION, 'AES', 64)
)
self.assertFalse(
self.plugin.supports(
plugin.PluginSupportTypes.SYMMETRIC_KEY_GENERATION, 'AES')
)
self.assertTrue(
self.plugin.supports(
plugin.PluginSupportTypes.SYMMETRIC_KEY_GENERATION,
'hmacsha512', 128)
)
self.assertFalse(
self.plugin.supports(
plugin.PluginSupportTypes.SYMMETRIC_KEY_GENERATION,
'hmacsha512', 12)
)
self.assertFalse(
self.plugin.supports(
plugin.PluginSupportTypes.SYMMETRIC_KEY_GENERATION,
'Camillia', 128)
)
def test_does_not_support_unknown_type(self):
self.assertFalse(
self.plugin.supports("SOMETHING_RANDOM")
)
def test_bind_kek_metadata(self):
kek_metadata_dto = mock.MagicMock()
kek_metadata_dto = self.plugin.bind_kek_metadata(kek_metadata_dto)
self.assertEqual('aes', kek_metadata_dto.algorithm)
self.assertEqual(128, kek_metadata_dto.bit_length)
self.assertEqual('cbc', kek_metadata_dto.mode)
def test_supports_asymmetric_key_generation(self):
self.assertTrue(
self.plugin.supports(
plugin.PluginSupportTypes.ASYMMETRIC_KEY_GENERATION,
'DSA', 1024)
)
self.assertTrue(
self.plugin.supports(
plugin.PluginSupportTypes.ASYMMETRIC_KEY_GENERATION,
"RSA", 1024)
)
self.assertFalse(
self.plugin.supports(
plugin.PluginSupportTypes.ASYMMETRIC_KEY_GENERATION,
"DSA", 512)
)
self.assertFalse(
self.plugin.supports(
plugin.PluginSupportTypes.ASYMMETRIC_KEY_GENERATION,
"RSA", 64)
)
def test_generate_asymmetric_1024_bit_key(self):
generate_dto = plugin.GenerateDTO('rsa', 1024, None, None)
kek_meta_dto = self._get_mocked_kek_meta_dto()
private_dto, public_dto, passwd_dto = self.plugin.generate_asymmetric(
generate_dto, kek_meta_dto, mock.MagicMock())
decrypt_dto = plugin.DecryptDTO(private_dto.cypher_text)
private_dto = self.plugin.decrypt(decrypt_dto,
kek_meta_dto,
private_dto.kek_meta_extended,
mock.MagicMock())
decrypt_dto = plugin.DecryptDTO(public_dto.cypher_text)
public_dto = self.plugin.decrypt(decrypt_dto,
kek_meta_dto,
public_dto.kek_meta_extended,
mock.MagicMock())
# check we can reload the private and public keys
private_key = serialization.load_pem_private_key(
data=private_dto,
password=None,
backend=default_backend()
)
public_key = serialization.load_pem_public_key(
data=public_dto,
backend=default_backend()
)
self.assertEqual(1024, private_key.key_size)
self.assertEqual(1024, public_key.key_size)
public_key = public_key.public_bytes(
encoding=serialization.Encoding.PEM,
format=serialization.PublicFormat.PKCS1
)
# get the public key from the private key we recovered to compare
recovered_key = private_key.public_key().public_bytes(
encoding=serialization.Encoding.PEM,
format=serialization.PublicFormat.PKCS1
)
self.assertTrue(public_key == recovered_key)
def test_generate_1024_bit_RSA_key_with_passphrase(self):
generate_dto = plugin.GenerateDTO('rsa', 1024, None, 'changeme')
kek_meta_dto = self._get_mocked_kek_meta_dto()
private_dto, public_dto, passwd_dto = self.plugin.generate_asymmetric(
generate_dto,
kek_meta_dto,
mock.MagicMock()
)
decrypt_dto = plugin.DecryptDTO(private_dto.cypher_text)
private_dto = self.plugin.decrypt(decrypt_dto,
kek_meta_dto,
private_dto.kek_meta_extended,
mock.MagicMock())
decrypt_dto = plugin.DecryptDTO(public_dto.cypher_text)
public_dto = self.plugin.decrypt(decrypt_dto,
kek_meta_dto,
public_dto.kek_meta_extended,
mock.MagicMock())
# check we can reload the private and public keys
private_key = serialization.load_pem_private_key(
data=private_dto,
password='changeme'.encode(),
backend=default_backend()
)
public_key = serialization.load_pem_public_key(
data=public_dto,
backend=default_backend()
)
self.assertEqual(1024, private_key.key_size)
self.assertEqual(1024, public_key.key_size)
public_key = public_key.public_bytes(
encoding=serialization.Encoding.PEM,
format=serialization.PublicFormat.PKCS1
)
# get the public key from the private key we recovered to compare
recovered_key = private_key.public_key().public_bytes(
encoding=serialization.Encoding.PEM,
format=serialization.PublicFormat.PKCS1
)
self.assertTrue(public_key == recovered_key)
def test_generate_1024_bit_DSA_key_with_passphrase(self):
generate_dto = plugin.GenerateDTO('dsa', 1024, None, 'changeme')
kek_meta_dto = self._get_mocked_kek_meta_dto()
private_dto, public_dto, passwd_dto = self.plugin.generate_asymmetric(
generate_dto,
kek_meta_dto,
mock.MagicMock()
)
decrypt_dto = plugin.DecryptDTO(private_dto.cypher_text)
private_dto = self.plugin.decrypt(decrypt_dto,
kek_meta_dto,
private_dto.kek_meta_extended,
mock.MagicMock())
decrypt_dto = plugin.DecryptDTO(public_dto.cypher_text)
public_dto = self.plugin.decrypt(decrypt_dto,
kek_meta_dto,
public_dto.kek_meta_extended,
mock.MagicMock())
# check we can reload the private and public keys
private_key = serialization.load_der_private_key(
data=private_dto,
password='changeme'.encode(),
backend=default_backend()
)
public_key = serialization.load_der_public_key(
data=public_dto,
backend=default_backend()
)
self.assertEqual(1024, private_key.key_size)
self.assertEqual(1024, public_key.key_size)
public_key = public_key.public_bytes(
encoding=serialization.Encoding.DER,
format=serialization.PublicFormat.SubjectPublicKeyInfo
)
# get the public key from the private key we recovered to compare
recovered_key = private_key.public_key().public_bytes(
encoding=serialization.Encoding.DER,
format=serialization.PublicFormat.SubjectPublicKeyInfo
)
self.assertTrue(public_key == recovered_key)
def test_generate_1024_DSA_key_in_pem_and_reconstruct_key_der(self):
generate_dto = plugin.GenerateDTO('dsa', 1024, None, None)
kek_meta_dto = self._get_mocked_kek_meta_dto()
private_dto, public_dto, passwd_dto = self.plugin.generate_asymmetric(
generate_dto,
kek_meta_dto,
mock.MagicMock()
)
decrypt_dto = plugin.DecryptDTO(private_dto.cypher_text)
private_dto = self.plugin.decrypt(decrypt_dto,
kek_meta_dto,
private_dto.kek_meta_extended,
mock.MagicMock())
private_key = serialization.load_der_private_key(
data=private_dto,
password=None,
backend=default_backend()
)
self.assertEqual(1024, private_key.key_size)
def test_generate_128_bit_hmac_key(self):
secret = models.Secret()
secret.bit_length = 128
secret.algorithm = "hmacsha256"
kek_meta_dto = self._get_mocked_kek_meta_dto()
generate_dto = plugin.GenerateDTO(
secret.algorithm,
secret.bit_length,
None, None)
response_dto = self.plugin.generate_symmetric(
generate_dto,
kek_meta_dto,
mock.MagicMock()
)
decrypt_dto = plugin.DecryptDTO(response_dto.cypher_text)
key = self.plugin.decrypt(decrypt_dto, kek_meta_dto,
response_dto.kek_meta_extended,
mock.MagicMock())
self.assertEqual(16, len(key))
def test_get_plugin_name(self):
self.assertIsNotNone(self.plugin.get_plugin_name())
|
|
# Copyright (c) 2013 Hewlett-Packard Development Company, L.P.
# Copyright (c) 2012 VMware, Inc.
# Copyright (c) 2011 Citrix Systems, Inc.
# Copyright 2011 OpenStack Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
A connection to the VMware vCenter platform.
"""
import re
from oslo_config import cfg
from oslo_log import log as logging
from oslo_serialization import jsonutils
from oslo_utils import excutils
from oslo_vmware import api
from oslo_vmware import exceptions as vexc
from oslo_vmware import pbm
from oslo_vmware import vim
from oslo_vmware import vim_util
from nova.compute import task_states
from nova.compute import vm_states
from nova import exception
from nova import utils
from nova.i18n import _, _LI, _LE, _LW
from nova import objects
from nova.virt import driver
from nova.virt.vmwareapi import constants
from nova.virt.vmwareapi import error_util
from nova.virt.vmwareapi import host
from nova.virt.vmwareapi import vim_util as nova_vim_util
from nova.virt.vmwareapi import vm_util
from nova.virt.vmwareapi import vmops
from nova.virt.vmwareapi import volumeops
LOG = logging.getLogger(__name__)
vmwareapi_opts = [
cfg.StrOpt('host_ip',
help='Hostname or IP address for connection to VMware '
'vCenter host.'),
cfg.IntOpt('host_port',
default=443,
min=1,
max=65535,
help='Port for connection to VMware vCenter host.'),
cfg.StrOpt('host_username',
help='Username for connection to VMware vCenter host.'),
cfg.StrOpt('host_password',
help='Password for connection to VMware vCenter host.',
secret=True),
cfg.StrOpt('ca_file',
help='Specify a CA bundle file to use in verifying the '
'vCenter server certificate.'),
cfg.BoolOpt('insecure',
default=False,
help='If true, the vCenter server certificate is not '
'verified. If false, then the default CA truststore is '
'used for verification. This option is ignored if '
'"ca_file" is set.'),
cfg.StrOpt('cluster_name',
help='Name of a VMware Cluster ComputeResource.'),
cfg.StrOpt('datastore_regex',
help='Regex to match the name of a datastore.'),
cfg.FloatOpt('task_poll_interval',
default=0.5,
help='The interval used for polling of remote tasks.'),
cfg.IntOpt('api_retry_count',
default=10,
help='The number of times we retry on failures, e.g., '
'socket error, etc.'),
cfg.IntOpt('vnc_port',
default=5900,
min=1,
max=65535,
help='VNC starting port'),
cfg.IntOpt('vnc_port_total',
default=10000,
help='Total number of VNC ports'),
cfg.BoolOpt('use_linked_clone',
default=True,
help='Whether to use linked clone'),
cfg.StrOpt('wsdl_location',
help='Optional VIM Service WSDL Location '
'e.g http://<server>/vimService.wsdl. '
'Optional over-ride to default location for bug '
'work-arounds')
]
spbm_opts = [
cfg.BoolOpt('pbm_enabled',
default=False,
help='The PBM status.'),
cfg.StrOpt('pbm_wsdl_location',
help='PBM service WSDL file location URL. '
'e.g. file:///opt/SDK/spbm/wsdl/pbmService.wsdl '
'Not setting this will disable storage policy based '
'placement of instances.'),
cfg.StrOpt('pbm_default_policy',
help='The PBM default policy. If pbm_wsdl_location is set and '
'there is no defined storage policy for the specific '
'request then this policy will be used.'),
]
CONF = cfg.CONF
CONF.register_opts(vmwareapi_opts, 'vmware')
CONF.register_opts(spbm_opts, 'vmware')
TIME_BETWEEN_API_CALL_RETRIES = 1.0
class VMwareVCDriver(driver.ComputeDriver):
"""The VC host connection object."""
capabilities = {
"has_imagecache": True,
"supports_recreate": False,
"supports_migrate_to_same_host": True
}
# Legacy nodename is of the form: <mo id>(<cluster name>)
# e.g. domain-26(TestCluster)
# We assume <mo id> consists of alphanumeric, _ and -.
# We assume cluster name is everything between the first ( and the last ).
# We pull out <mo id> for re-use.
LEGACY_NODENAME = re.compile('([\w-]+)\(.+\)')
# The vCenter driver includes API that acts on ESX hosts or groups
# of ESX hosts in clusters or non-cluster logical-groupings.
#
# vCenter is not a hypervisor itself, it works with multiple
# hypervisor host machines and their guests. This fact can
# subtly alter how vSphere and OpenStack interoperate.
def __init__(self, virtapi, scheme="https"):
super(VMwareVCDriver, self).__init__(virtapi)
if (CONF.vmware.host_ip is None or
CONF.vmware.host_username is None or
CONF.vmware.host_password is None):
raise Exception(_("Must specify host_ip, host_username and "
"host_password to use vmwareapi.VMwareVCDriver"))
self._datastore_regex = None
if CONF.vmware.datastore_regex:
try:
self._datastore_regex = re.compile(CONF.vmware.datastore_regex)
except re.error:
raise exception.InvalidInput(reason=
_("Invalid Regular Expression %s")
% CONF.vmware.datastore_regex)
self._session = VMwareAPISession(scheme=scheme)
self._check_min_version()
# Update the PBM location if necessary
if CONF.vmware.pbm_enabled:
self._update_pbm_location()
self._validate_configuration()
self._cluster_name = CONF.vmware.cluster_name
self._cluster_ref = vm_util.get_cluster_ref_by_name(self._session,
self._cluster_name)
if self._cluster_ref is None:
raise exception.NotFound(_("The specified cluster '%s' was not "
"found in vCenter")
% self._cluster_name)
self._vcenter_uuid = self._get_vcenter_uuid()
self._nodename = self._create_nodename(self._cluster_ref.value)
self._volumeops = volumeops.VMwareVolumeOps(self._session,
self._cluster_ref)
self._vmops = vmops.VMwareVMOps(self._session,
virtapi,
self._volumeops,
self._cluster_ref,
datastore_regex=self._datastore_regex)
self._vc_state = host.VCState(self._session,
self._nodename,
self._cluster_ref,
self._datastore_regex)
# Register the OpenStack extension
self._register_openstack_extension()
def _check_min_version(self):
min_version = utils.convert_version_to_int(constants.MIN_VC_VERSION)
vc_version = vim_util.get_vc_version(self._session)
LOG.info(_LI("VMware vCenter version: %s"), vc_version)
if min_version > utils.convert_version_to_int(vc_version):
# TODO(garyk): enforce this from M
LOG.warning(_LW('Running Nova with a VMware vCenter version less '
'than %(version)s is deprecated. The required '
'minimum version of vCenter will be raised to '
'%(version)s in the 13.0.0 release.'),
{'version': constants.MIN_VC_VERSION})
@property
def need_legacy_block_device_info(self):
return False
def _update_pbm_location(self):
if CONF.vmware.pbm_wsdl_location:
pbm_wsdl_loc = CONF.vmware.pbm_wsdl_location
else:
version = vim_util.get_vc_version(self._session)
pbm_wsdl_loc = pbm.get_pbm_wsdl_location(version)
self._session.pbm_wsdl_loc_set(pbm_wsdl_loc)
def _validate_configuration(self):
if CONF.vmware.pbm_enabled:
if not CONF.vmware.pbm_default_policy:
raise error_util.PbmDefaultPolicyUnspecified()
if not pbm.get_profile_id_by_name(
self._session,
CONF.vmware.pbm_default_policy):
raise error_util.PbmDefaultPolicyDoesNotExist()
if CONF.vmware.datastore_regex:
LOG.warning(_LW(
"datastore_regex is ignored when PBM is enabled"))
self._datastore_regex = None
def init_host(self, host):
vim = self._session.vim
if vim is None:
self._session._create_session()
def cleanup_host(self, host):
self._session.logout()
def _register_openstack_extension(self):
# Register an 'OpenStack' extension in vCenter
LOG.debug('Registering extension %s with vCenter',
constants.EXTENSION_KEY)
os_extension = self._session._call_method(vim_util, 'find_extension',
constants.EXTENSION_KEY)
if os_extension is None:
LOG.debug('Extension does not exist. Registering type %s.',
constants.EXTENSION_TYPE_INSTANCE)
self._session._call_method(vim_util, 'register_extension',
constants.EXTENSION_KEY,
constants.EXTENSION_TYPE_INSTANCE)
def cleanup(self, context, instance, network_info, block_device_info=None,
destroy_disks=True, migrate_data=None, destroy_vifs=True):
"""Cleanup after instance being destroyed by Hypervisor."""
pass
def resume_state_on_host_boot(self, context, instance, network_info,
block_device_info=None):
"""resume guest state when a host is booted."""
# Check if the instance is running already and avoid doing
# anything if it is.
state = vm_util.get_vm_state(self._session, instance)
ignored_states = ['poweredon', 'suspended']
if state.lower() in ignored_states:
return
# Instance is not up and could be in an unknown state.
# Be as absolute as possible about getting it back into
# a known and running state.
self.reboot(context, instance, network_info, 'hard',
block_device_info)
def list_instance_uuids(self):
"""List VM instance UUIDs."""
return self._vmops.list_instances()
def list_instances(self):
"""List VM instances from the single compute node."""
return self._vmops.list_instances()
def migrate_disk_and_power_off(self, context, instance, dest,
flavor, network_info,
block_device_info=None,
timeout=0, retry_interval=0):
"""Transfers the disk of a running instance in multiple phases, turning
off the instance before the end.
"""
# TODO(PhilDay): Add support for timeout (clean shutdown)
return self._vmops.migrate_disk_and_power_off(context, instance,
dest, flavor)
def confirm_migration(self, migration, instance, network_info):
"""Confirms a resize, destroying the source VM."""
self._vmops.confirm_migration(migration, instance, network_info)
def finish_revert_migration(self, context, instance, network_info,
block_device_info=None, power_on=True):
"""Finish reverting a resize, powering back on the instance."""
self._vmops.finish_revert_migration(context, instance, network_info,
block_device_info, power_on)
def finish_migration(self, context, migration, instance, disk_info,
network_info, image_meta, resize_instance,
block_device_info=None, power_on=True):
"""Completes a resize, turning on the migrated instance."""
image_meta = objects.ImageMeta.from_dict(image_meta)
self._vmops.finish_migration(context, migration, instance, disk_info,
network_info, image_meta, resize_instance,
block_device_info, power_on)
def live_migration(self, context, instance, dest,
post_method, recover_method, block_migration=False,
migrate_data=None):
"""Live migration of an instance to another host."""
self._vmops.live_migration(context, instance, dest,
post_method, recover_method,
block_migration)
def rollback_live_migration_at_destination(self, context, instance,
network_info,
block_device_info,
destroy_disks=True,
migrate_data=None):
"""Clean up destination node after a failed live migration."""
self.destroy(context, instance, network_info, block_device_info)
def get_instance_disk_info(self, instance, block_device_info=None):
pass
def get_vnc_console(self, context, instance):
"""Return link to instance's VNC console using vCenter logic."""
# vCenter does not actually run the VNC service
# itself. You must talk to the VNC host underneath vCenter.
return self._vmops.get_vnc_console(instance)
def get_mks_console(self, context, instance):
return self._vmops.get_mks_console(instance)
def _get_vcenter_uuid(self):
"""Retrieves the vCenter UUID."""
about = self._session._call_method(nova_vim_util, 'get_about_info')
return about.instanceUuid
def _create_nodename(self, mo_id):
"""Return a nodename which uniquely describes a cluster.
The name will be of the form:
<mo id>.<vcenter uuid>
e.g.
domain-26.9d51f082-58a4-4449-beed-6fd205a5726b
"""
return '%s.%s' % (mo_id, self._vcenter_uuid)
def _get_available_resources(self, host_stats):
return {'vcpus': host_stats['vcpus'],
'memory_mb': host_stats['host_memory_total'],
'local_gb': host_stats['disk_total'],
'vcpus_used': 0,
'memory_mb_used': host_stats['host_memory_total'] -
host_stats['host_memory_free'],
'local_gb_used': host_stats['disk_used'],
'hypervisor_type': host_stats['hypervisor_type'],
'hypervisor_version': host_stats['hypervisor_version'],
'hypervisor_hostname': host_stats['hypervisor_hostname'],
# The VMWare driver manages multiple hosts, so there are
# likely many different CPU models in use. As such it is
# impossible to provide any meaningful info on the CPU
# model of the "host"
'cpu_info': None,
'supported_instances': jsonutils.dumps(
host_stats['supported_instances']),
'numa_topology': None,
}
def get_available_resource(self, nodename):
"""Retrieve resource info.
This method is called when nova-compute launches, and
as part of a periodic task.
:returns: dictionary describing resources
"""
host_stats = self._vc_state.get_host_stats(refresh=True)
stats_dict = self._get_available_resources(host_stats)
return stats_dict
def get_available_nodes(self, refresh=False):
"""Returns nodenames of all nodes managed by the compute service.
This driver supports only one compute node.
"""
return [self._nodename]
def spawn(self, context, instance, image_meta, injected_files,
admin_password, network_info=None, block_device_info=None):
"""Create VM instance."""
image_meta = objects.ImageMeta.from_dict(image_meta)
self._vmops.spawn(context, instance, image_meta, injected_files,
admin_password, network_info, block_device_info)
def attach_volume(self, context, connection_info, instance, mountpoint,
disk_bus=None, device_type=None, encryption=None):
"""Attach volume storage to VM instance."""
return self._volumeops.attach_volume(connection_info, instance)
def detach_volume(self, connection_info, instance, mountpoint,
encryption=None):
"""Detach volume storage to VM instance."""
return self._volumeops.detach_volume(connection_info, instance)
def get_volume_connector(self, instance):
"""Return volume connector information."""
return self._volumeops.get_volume_connector(instance)
def get_host_ip_addr(self):
"""Returns the IP address of the vCenter host."""
return CONF.vmware.host_ip
def snapshot(self, context, instance, image_id, update_task_state):
"""Create snapshot from a running VM instance."""
self._vmops.snapshot(context, instance, image_id, update_task_state)
def reboot(self, context, instance, network_info, reboot_type,
block_device_info=None, bad_volumes_callback=None):
"""Reboot VM instance."""
self._vmops.reboot(instance, network_info, reboot_type)
def _detach_instance_volumes(self, instance, block_device_info):
# We need to detach attached volumes
block_device_mapping = driver.block_device_info_get_mapping(
block_device_info)
if block_device_mapping:
# Certain disk types, for example 'IDE' do not support hot
# plugging. Hence we need to power off the instance and update
# the instance state.
self._vmops.power_off(instance)
# TODO(garyk): update the volumeops to read the state form the
# VM instead of relying on a instance flag
instance.vm_state = vm_states.STOPPED
for disk in block_device_mapping:
connection_info = disk['connection_info']
try:
self.detach_volume(connection_info, instance,
disk.get('device_name'))
except exception.StorageError:
# The volume does not exist
# NOTE(garyk): change to warning after string freeze
LOG.debug('%s does not exist!', disk.get('device_name'),
instance=instance)
except Exception as e:
with excutils.save_and_reraise_exception():
LOG.error(_LE("Failed to detach %(device_name)s. "
"Exception: %(exc)s"),
{'device_name': disk.get('device_name'),
'exc': e},
instance=instance)
def destroy(self, context, instance, network_info, block_device_info=None,
destroy_disks=True, migrate_data=None):
"""Destroy VM instance."""
# Destroy gets triggered when Resource Claim in resource_tracker
# is not successful. When resource claim is not successful,
# node is not set in instance. Perform destroy only if node is set
if not instance.node:
return
# A resize uses the same instance on the VC. We do not delete that
# VM in the event of a revert
if instance.task_state == task_states.RESIZE_REVERTING:
return
# We need to detach attached volumes
if block_device_info is not None:
try:
self._detach_instance_volumes(instance, block_device_info)
except vexc.ManagedObjectNotFoundException:
LOG.warning(_LW('Instance does not exists. Proceeding to '
'delete instance properties on datastore'),
instance=instance)
self._vmops.destroy(instance, destroy_disks)
def pause(self, instance):
"""Pause VM instance."""
self._vmops.pause(instance)
def unpause(self, instance):
"""Unpause paused VM instance."""
self._vmops.unpause(instance)
def suspend(self, context, instance):
"""Suspend the specified instance."""
self._vmops.suspend(instance)
def resume(self, context, instance, network_info, block_device_info=None):
"""Resume the suspended VM instance."""
self._vmops.resume(instance)
def rescue(self, context, instance, network_info, image_meta,
rescue_password):
"""Rescue the specified instance."""
image_meta = objects.ImageMeta.from_dict(image_meta)
self._vmops.rescue(context, instance, network_info, image_meta)
def unrescue(self, instance, network_info):
"""Unrescue the specified instance."""
self._vmops.unrescue(instance)
def power_off(self, instance, timeout=0, retry_interval=0):
"""Power off the specified instance."""
# TODO(PhilDay): Add support for timeout (clean shutdown)
self._vmops.power_off(instance)
def power_on(self, context, instance, network_info,
block_device_info=None):
"""Power on the specified instance."""
self._vmops.power_on(instance)
def poll_rebooting_instances(self, timeout, instances):
"""Poll for rebooting instances."""
self._vmops.poll_rebooting_instances(timeout, instances)
def get_info(self, instance):
"""Return info about the VM instance."""
return self._vmops.get_info(instance)
def get_diagnostics(self, instance):
"""Return data about VM diagnostics."""
return self._vmops.get_diagnostics(instance)
def get_instance_diagnostics(self, instance):
"""Return data about VM diagnostics."""
return self._vmops.get_instance_diagnostics(instance)
def host_power_action(self, action):
"""Host operations not supported by VC driver.
This needs to override the ESX driver implementation.
"""
raise NotImplementedError()
def host_maintenance_mode(self, host, mode):
"""Host operations not supported by VC driver.
This needs to override the ESX driver implementation.
"""
raise NotImplementedError()
def set_host_enabled(self, enabled):
"""Host operations not supported by VC driver.
This needs to override the ESX driver implementation.
"""
raise NotImplementedError()
def get_host_uptime(self):
"""Host uptime operation not supported by VC driver."""
msg = _("Multiple hosts may be managed by the VMWare "
"vCenter driver; therefore we do not return "
"uptime for just one host.")
raise NotImplementedError(msg)
def inject_network_info(self, instance, nw_info):
"""inject network info for specified instance."""
self._vmops.inject_network_info(instance, nw_info)
def manage_image_cache(self, context, all_instances):
"""Manage the local cache of images."""
self._vmops.manage_image_cache(context, all_instances)
def instance_exists(self, instance):
"""Efficient override of base instance_exists method."""
return self._vmops.instance_exists(instance)
def attach_interface(self, instance, image_meta, vif):
"""Attach an interface to the instance."""
image_meta = objects.ImageMeta.from_dict(image_meta)
self._vmops.attach_interface(instance, image_meta, vif)
def detach_interface(self, instance, vif):
"""Detach an interface from the instance."""
self._vmops.detach_interface(instance, vif)
class VMwareAPISession(api.VMwareAPISession):
"""Sets up a session with the VC/ESX host and handles all
the calls made to the host.
"""
def __init__(self, host_ip=CONF.vmware.host_ip,
host_port=CONF.vmware.host_port,
username=CONF.vmware.host_username,
password=CONF.vmware.host_password,
retry_count=CONF.vmware.api_retry_count,
scheme="https",
cacert=CONF.vmware.ca_file,
insecure=CONF.vmware.insecure):
super(VMwareAPISession, self).__init__(
host=host_ip,
port=host_port,
server_username=username,
server_password=password,
api_retry_count=retry_count,
task_poll_interval=CONF.vmware.task_poll_interval,
scheme=scheme,
create_session=True,
wsdl_loc=CONF.vmware.wsdl_location,
cacert=cacert,
insecure=insecure)
def _is_vim_object(self, module):
"""Check if the module is a VIM Object instance."""
return isinstance(module, vim.Vim)
def _call_method(self, module, method, *args, **kwargs):
"""Calls a method within the module specified with
args provided.
"""
if not self._is_vim_object(module):
return self.invoke_api(module, method, self.vim, *args, **kwargs)
else:
return self.invoke_api(module, method, *args, **kwargs)
def _wait_for_task(self, task_ref):
"""Return a Deferred that will give the result of the given task.
The task is polled until it completes.
"""
return self.wait_for_task(task_ref)
|
|
import datetime
from dojo.importers import utils as importer_utils
from dojo.models import Test, Finding, \
Test_Type, \
BurpRawRequestResponse, \
Endpoint_Status, \
Test_Import
from dojo.endpoint.utils import endpoint_get_or_create
from dojo.utils import get_current_user, max_safe
from django.core.exceptions import MultipleObjectsReturned
from django.conf import settings
from django.core.exceptions import ValidationError
from django.utils import timezone
import dojo.notifications.helper as notifications_helper
import dojo.finding.helper as finding_helper
import dojo.jira_link.helper as jira_helper
import base64
import logging
logger = logging.getLogger(__name__)
deduplicationLogger = logging.getLogger("dojo.specific-loggers.deduplication")
class DojoDefaultImporter(object):
def create_test(self, scan_type, engagement, lead, environment, tags=None,
scan_date=None, version=None, branch_tag=None, build_id=None, commit_hash=None, now=timezone.now()):
test_type, created = Test_Type.objects.get_or_create(
name=scan_type)
if created:
logger.info('Created new Test_Type with name %s because a report is being imported', test_type.name)
test = Test(
engagement=engagement,
lead=lead,
test_type=test_type,
target_start=scan_date if scan_date else now.date(),
target_end=scan_date if scan_date else now.date(),
environment=environment,
percent_complete=100,
version=version,
branch_tag=branch_tag,
build_id=build_id,
commit_hash=commit_hash,
tags=tags)
try:
# TODO What is going on here?
test.full_clean()
except ValidationError:
pass
test.save()
return test
def process_parsed_findings(self, test, parsed_findings, scan_type, user, active, verified, minimum_severity=None,
endpoints_to_add=None, push_to_jira=None, group_by=None, now=timezone.now()):
logger.debug('endpoints_to_add: %s', endpoints_to_add)
new_findings = []
items = parsed_findings
logger.debug('starting import of %i items.', len(items) if items else 0)
i = 0
for item in items:
sev = item.severity
if sev == 'Information' or sev == 'Informational':
sev = 'Info'
item.severity = sev
item.numerical_severity = Finding.get_numerical_severity(sev)
if minimum_severity and (Finding.SEVERITIES[sev] >
Finding.SEVERITIES[minimum_severity]):
continue
item.test = test
item.reporter = user if user else get_current_user
item.last_reviewed = now
item.last_reviewed_by = user if user else get_current_user
# Only set active/verified flags if they were NOT set by default value(True)
if item.active:
item.active = active
if item.verified:
item.verified = verified
item.created = now
item.updated = now
item.save(dedupe_option=False)
if settings.FEATURE_FINDING_GROUPS and group_by:
finding_helper.add_finding_to_auto_group(item, group_by)
if (hasattr(item, 'unsaved_req_resp') and
len(item.unsaved_req_resp) > 0):
for req_resp in item.unsaved_req_resp:
burp_rr = BurpRawRequestResponse(
finding=item,
burpRequestBase64=base64.b64encode(req_resp["req"].encode("utf-8")),
burpResponseBase64=base64.b64encode(req_resp["resp"].encode("utf-8")))
burp_rr.clean()
burp_rr.save()
if (item.unsaved_request is not None and
item.unsaved_response is not None):
burp_rr = BurpRawRequestResponse(
finding=item,
burpRequestBase64=base64.b64encode(item.unsaved_request.encode()),
burpResponseBase64=base64.b64encode(item.unsaved_response.encode()))
burp_rr.clean()
burp_rr.save()
for endpoint in item.unsaved_endpoints:
try:
endpoint.clean()
except ValidationError as e:
logger.warning("DefectDojo is storing broken endpoint because cleaning wasn't successful: "
"{}".format(e))
try:
ep, created = endpoint_get_or_create(
protocol=endpoint.protocol,
userinfo=endpoint.userinfo,
host=endpoint.host,
port=endpoint.port,
path=endpoint.path,
query=endpoint.query,
fragment=endpoint.fragment,
product=test.engagement.product)
except (MultipleObjectsReturned):
pass
try:
eps, created = Endpoint_Status.objects.get_or_create(
finding=item,
endpoint=ep)
except (MultipleObjectsReturned):
pass
ep.endpoint_status.add(eps)
item.endpoint_status.add(eps)
item.endpoints.add(ep)
if endpoints_to_add:
for endpoint in endpoints_to_add:
logger.debug('adding endpoint %s', endpoint)
# TODO Not sure what happens here, we get an endpoint model and try to create it again?
try:
endpoint.clean()
except ValidationError as e:
logger.warning("DefectDojo is storing broken endpoint because cleaning wasn't successful: "
"{}".format(e))
try:
ep, created = endpoint_get_or_create(
protocol=endpoint.protocol,
userinfo=endpoint.userinfo,
host=endpoint.host,
port=endpoint.port,
path=endpoint.path,
query=endpoint.query,
fragment=endpoint.fragment,
product=test.engagement.product)
except (MultipleObjectsReturned):
pass
try:
eps, created = Endpoint_Status.objects.get_or_create(
finding=item,
endpoint=ep)
except (MultipleObjectsReturned):
pass
ep.endpoint_status.add(eps)
item.endpoints.add(ep)
item.endpoint_status.add(eps)
if item.unsaved_tags:
item.tags = item.unsaved_tags
new_findings.append(item)
# to avoid pushing a finding group multiple times, we push those outside of the loop
if settings.FEATURE_FINDING_GROUPS and item.finding_group:
item.save()
else:
item.save(push_to_jira=push_to_jira)
if settings.FEATURE_FINDING_GROUPS and push_to_jira:
for finding_group in set([finding.finding_group for finding in new_findings if finding.finding_group is not None]):
jira_helper.push_to_jira(finding_group)
return new_findings
def close_old_findings(self, test, scan_date_time, user, push_to_jira=None):
old_findings = []
# Close old active findings that are not reported by this scan.
new_hash_codes = test.finding_set.values('hash_code')
# TODO I don't think these criteria are 100% correct, why are findings with the same hash_code excluded?
# Would it make more sense to exclude duplicates? But the deduplication process can be unfinished because it's
# run in a celery async task...
if test.engagement.deduplication_on_engagement:
old_findings = Finding.objects.exclude(test=test) \
.exclude(hash_code__in=new_hash_codes) \
.filter(test__engagement=test.engagement,
test__test_type=test.test_type,
active=True)
else:
# TODO BUG? this will violate the deduplication_on_engagement setting for other engagements
old_findings = Finding.objects.exclude(test=test) \
.exclude(hash_code__in=new_hash_codes) \
.filter(test__engagement__product=test.engagement.product,
test__test_type=test.test_type,
active=True)
for old_finding in old_findings:
old_finding.active = False
old_finding.is_mitigated = True
old_finding.mitigated = scan_date_time
old_finding.notes.create(author=user,
entry="This finding has been automatically closed"
" as it is not present anymore in recent scans.")
endpoint_status = old_finding.endpoint_status.all()
for status in endpoint_status:
status.mitigated_by = user
status.mitigated_time = timezone.now()
status.mitigated = True
status.last_modified = timezone.now()
status.save()
old_finding.tags.add('stale')
# to avoid pushing a finding group multiple times, we push those outside of the loop
if settings.FEATURE_FINDING_GROUPS and old_finding.finding_group:
# don't try to dedupe findings that we are closing
old_finding.save(dedupe_option=False)
else:
old_finding.save(dedupe_option=False, push_to_jira=push_to_jira)
if settings.FEATURE_FINDING_GROUPS and push_to_jira:
for finding_group in set([finding.finding_group for finding in old_findings if finding.finding_group is not None]):
jira_helper.push_to_jira(finding_group)
return old_findings
def update_timestamps(self, test, scan_date, version, branch_tag, build_id, commit_hash, now, scan_date_time):
test.engagement.updated = now
if test.engagement.engagement_type == 'CI/CD':
test.engagement.target_end = max_safe([scan_date, test.engagement.target_end])
test.updated = now
test.target_end = max_safe([scan_date_time, test.target_end])
if version:
test.version = version
if branch_tag:
test.branch_tag = branch_tag
test.engagement.version = version
if build_id:
test.build_id = build_id
if branch_tag:
test.commit_hash = commit_hash
test.save()
test.engagement.save()
def import_scan(self, scan, scan_type, engagement, lead, environment, active, verified, tags=None, minimum_severity=None,
user=None, endpoints_to_add=None, scan_date=None, version=None, branch_tag=None, build_id=None,
commit_hash=None, push_to_jira=None, close_old_findings=False, group_by=None):
logger.debug(f'IMPORT_SCAN: parameters: {locals()}')
user = user or get_current_user()
now = timezone.now()
# retain weird existing logic to use current time for provided scan date
scan_date_time = datetime.datetime.combine(scan_date, timezone.now().time())
if settings.USE_TZ:
scan_date_time = timezone.make_aware(scan_date_time, timezone.get_default_timezone())
logger.debug('IMPORT_SCAN: Create Test')
test = self.create_test(scan_type, engagement, lead, environment, scan_date=scan_date, tags=tags,
version=version, branch_tag=branch_tag, build_id=build_id, commit_hash=commit_hash, now=now)
logger.debug('IMPORT_SCAN: Parse findings')
parsed_findings = importer_utils.parse_findings(scan, test, active, verified, scan_type)
logger.debug('IMPORT_SCAN: Processing findings')
new_findings = self.process_parsed_findings(test, parsed_findings, scan_type, user, active,
verified, minimum_severity=minimum_severity,
endpoints_to_add=endpoints_to_add, push_to_jira=push_to_jira,
group_by=group_by, now=now)
closed_findings = []
if close_old_findings:
logger.debug('IMPORT_SCAN: Closing findings no longer present in scan report')
closed_findings = self.close_old_findings(test, scan_date_time, user=user, push_to_jira=push_to_jira)
logger.debug('IMPORT_SCAN: Updating test/engagement timestamps')
importer_utils.update_timestamps(test, scan_date, version, branch_tag, build_id, commit_hash, now, scan_date_time)
if settings.TRACK_IMPORT_HISTORY:
logger.debug('IMPORT_SCAN: Updating Import History')
importer_utils.update_import_history(Test_Import.IMPORT_TYPE, active, verified, tags, minimum_severity,
endpoints_to_add, version, branch_tag, build_id, commit_hash,
push_to_jira, close_old_findings, test, new_findings, closed_findings)
logger.debug('IMPORT_SCAN: Generating notifications')
notifications_helper.notify_test_created(test)
updated_count = len(new_findings) + len(closed_findings)
if updated_count > 0:
notifications_helper.notify_scan_added(test, updated_count, new_findings=new_findings, findings_mitigated=closed_findings)
logger.debug('IMPORT_SCAN: Done')
return test, len(new_findings), len(closed_findings)
|
|
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
import datetime
from typing import Dict, List, Optional, Union
from azure.core.exceptions import HttpResponseError
import msrest.serialization
from ._source_control_configuration_client_enums import *
class ComplianceStatus(msrest.serialization.Model):
"""Compliance Status details.
Variables are only populated by the server, and will be ignored when sending a request.
:ivar compliance_state: The compliance state of the configuration. Possible values include:
"Pending", "Compliant", "Noncompliant", "Installed", "Failed".
:vartype compliance_state: str or
~azure.mgmt.kubernetesconfiguration.v2020_10_01_preview.models.ComplianceStateType
:param last_config_applied: Datetime the configuration was last applied.
:type last_config_applied: ~datetime.datetime
:param message: Message from when the configuration was applied.
:type message: str
:param message_level: Level of the message. Possible values include: "Error", "Warning",
"Information".
:type message_level: str or
~azure.mgmt.kubernetesconfiguration.v2020_10_01_preview.models.MessageLevelType
"""
_validation = {
'compliance_state': {'readonly': True},
}
_attribute_map = {
'compliance_state': {'key': 'complianceState', 'type': 'str'},
'last_config_applied': {'key': 'lastConfigApplied', 'type': 'iso-8601'},
'message': {'key': 'message', 'type': 'str'},
'message_level': {'key': 'messageLevel', 'type': 'str'},
}
def __init__(
self,
*,
last_config_applied: Optional[datetime.datetime] = None,
message: Optional[str] = None,
message_level: Optional[Union[str, "MessageLevelType"]] = None,
**kwargs
):
super(ComplianceStatus, self).__init__(**kwargs)
self.compliance_state = None
self.last_config_applied = last_config_applied
self.message = message
self.message_level = message_level
class ErrorDefinition(msrest.serialization.Model):
"""Error definition.
All required parameters must be populated in order to send to Azure.
:param code: Required. Service specific error code which serves as the substatus for the HTTP
error code.
:type code: str
:param message: Required. Description of the error.
:type message: str
"""
_validation = {
'code': {'required': True},
'message': {'required': True},
}
_attribute_map = {
'code': {'key': 'code', 'type': 'str'},
'message': {'key': 'message', 'type': 'str'},
}
def __init__(
self,
*,
code: str,
message: str,
**kwargs
):
super(ErrorDefinition, self).__init__(**kwargs)
self.code = code
self.message = message
class ErrorResponse(msrest.serialization.Model):
"""Error response.
Variables are only populated by the server, and will be ignored when sending a request.
:ivar error: Error definition.
:vartype error: ~azure.mgmt.kubernetesconfiguration.v2020_10_01_preview.models.ErrorDefinition
"""
_validation = {
'error': {'readonly': True},
}
_attribute_map = {
'error': {'key': 'error', 'type': 'ErrorDefinition'},
}
def __init__(
self,
**kwargs
):
super(ErrorResponse, self).__init__(**kwargs)
self.error = None
class HelmOperatorProperties(msrest.serialization.Model):
"""Properties for Helm operator.
:param chart_version: Version of the operator Helm chart.
:type chart_version: str
:param chart_values: Values override for the operator Helm chart.
:type chart_values: str
"""
_attribute_map = {
'chart_version': {'key': 'chartVersion', 'type': 'str'},
'chart_values': {'key': 'chartValues', 'type': 'str'},
}
def __init__(
self,
*,
chart_version: Optional[str] = None,
chart_values: Optional[str] = None,
**kwargs
):
super(HelmOperatorProperties, self).__init__(**kwargs)
self.chart_version = chart_version
self.chart_values = chart_values
class Resource(msrest.serialization.Model):
"""The Resource model definition.
Variables are only populated by the server, and will be ignored when sending a request.
:ivar id: Resource Id.
:vartype id: str
:ivar name: Resource name.
:vartype name: str
:ivar type: Resource type.
:vartype type: str
:param system_data: Top level metadata
https://github.com/Azure/azure-resource-manager-rpc/blob/master/v1.0/common-api-contracts.md#system-metadata-for-all-azure-resources.
:type system_data: ~azure.mgmt.kubernetesconfiguration.v2020_10_01_preview.models.SystemData
"""
_validation = {
'id': {'readonly': True},
'name': {'readonly': True},
'type': {'readonly': True},
}
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
'name': {'key': 'name', 'type': 'str'},
'type': {'key': 'type', 'type': 'str'},
'system_data': {'key': 'systemData', 'type': 'SystemData'},
}
def __init__(
self,
*,
system_data: Optional["SystemData"] = None,
**kwargs
):
super(Resource, self).__init__(**kwargs)
self.id = None
self.name = None
self.type = None
self.system_data = system_data
class ProxyResource(Resource):
"""ARM proxy resource.
Variables are only populated by the server, and will be ignored when sending a request.
:ivar id: Resource Id.
:vartype id: str
:ivar name: Resource name.
:vartype name: str
:ivar type: Resource type.
:vartype type: str
:param system_data: Top level metadata
https://github.com/Azure/azure-resource-manager-rpc/blob/master/v1.0/common-api-contracts.md#system-metadata-for-all-azure-resources.
:type system_data: ~azure.mgmt.kubernetesconfiguration.v2020_10_01_preview.models.SystemData
"""
_validation = {
'id': {'readonly': True},
'name': {'readonly': True},
'type': {'readonly': True},
}
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
'name': {'key': 'name', 'type': 'str'},
'type': {'key': 'type', 'type': 'str'},
'system_data': {'key': 'systemData', 'type': 'SystemData'},
}
def __init__(
self,
*,
system_data: Optional["SystemData"] = None,
**kwargs
):
super(ProxyResource, self).__init__(system_data=system_data, **kwargs)
class ResourceProviderOperation(msrest.serialization.Model):
"""Supported operation of this resource provider.
Variables are only populated by the server, and will be ignored when sending a request.
:param name: Operation name, in format of {provider}/{resource}/{operation}.
:type name: str
:param display: Display metadata associated with the operation.
:type display:
~azure.mgmt.kubernetesconfiguration.v2020_10_01_preview.models.ResourceProviderOperationDisplay
:ivar is_data_action: The flag that indicates whether the operation applies to data plane.
:vartype is_data_action: bool
"""
_validation = {
'is_data_action': {'readonly': True},
}
_attribute_map = {
'name': {'key': 'name', 'type': 'str'},
'display': {'key': 'display', 'type': 'ResourceProviderOperationDisplay'},
'is_data_action': {'key': 'isDataAction', 'type': 'bool'},
}
def __init__(
self,
*,
name: Optional[str] = None,
display: Optional["ResourceProviderOperationDisplay"] = None,
**kwargs
):
super(ResourceProviderOperation, self).__init__(**kwargs)
self.name = name
self.display = display
self.is_data_action = None
class ResourceProviderOperationDisplay(msrest.serialization.Model):
"""Display metadata associated with the operation.
:param provider: Resource provider: Microsoft KubernetesConfiguration.
:type provider: str
:param resource: Resource on which the operation is performed.
:type resource: str
:param operation: Type of operation: get, read, delete, etc.
:type operation: str
:param description: Description of this operation.
:type description: str
"""
_attribute_map = {
'provider': {'key': 'provider', 'type': 'str'},
'resource': {'key': 'resource', 'type': 'str'},
'operation': {'key': 'operation', 'type': 'str'},
'description': {'key': 'description', 'type': 'str'},
}
def __init__(
self,
*,
provider: Optional[str] = None,
resource: Optional[str] = None,
operation: Optional[str] = None,
description: Optional[str] = None,
**kwargs
):
super(ResourceProviderOperationDisplay, self).__init__(**kwargs)
self.provider = provider
self.resource = resource
self.operation = operation
self.description = description
class ResourceProviderOperationList(msrest.serialization.Model):
"""Result of the request to list operations.
Variables are only populated by the server, and will be ignored when sending a request.
:param value: List of operations supported by this resource provider.
:type value:
list[~azure.mgmt.kubernetesconfiguration.v2020_10_01_preview.models.ResourceProviderOperation]
:ivar next_link: URL to the next set of results, if any.
:vartype next_link: str
"""
_validation = {
'next_link': {'readonly': True},
}
_attribute_map = {
'value': {'key': 'value', 'type': '[ResourceProviderOperation]'},
'next_link': {'key': 'nextLink', 'type': 'str'},
}
def __init__(
self,
*,
value: Optional[List["ResourceProviderOperation"]] = None,
**kwargs
):
super(ResourceProviderOperationList, self).__init__(**kwargs)
self.value = value
self.next_link = None
class Result(msrest.serialization.Model):
"""Sample result definition.
:param sample_property: Sample property of type string.
:type sample_property: str
"""
_attribute_map = {
'sample_property': {'key': 'sampleProperty', 'type': 'str'},
}
def __init__(
self,
*,
sample_property: Optional[str] = None,
**kwargs
):
super(Result, self).__init__(**kwargs)
self.sample_property = sample_property
class SourceControlConfiguration(ProxyResource):
"""The SourceControl Configuration object returned in Get & Put response.
Variables are only populated by the server, and will be ignored when sending a request.
:ivar id: Resource Id.
:vartype id: str
:ivar name: Resource name.
:vartype name: str
:ivar type: Resource type.
:vartype type: str
:param system_data: Top level metadata
https://github.com/Azure/azure-resource-manager-rpc/blob/master/v1.0/common-api-contracts.md#system-metadata-for-all-azure-resources.
:type system_data: ~azure.mgmt.kubernetesconfiguration.v2020_10_01_preview.models.SystemData
:param repository_url: Url of the SourceControl Repository.
:type repository_url: str
:param operator_namespace: The namespace to which this operator is installed to. Maximum of 253
lower case alphanumeric characters, hyphen and period only.
:type operator_namespace: str
:param operator_instance_name: Instance name of the operator - identifying the specific
configuration.
:type operator_instance_name: str
:param operator_type: Type of the operator. Possible values include: "Flux".
:type operator_type: str or
~azure.mgmt.kubernetesconfiguration.v2020_10_01_preview.models.OperatorType
:param operator_params: Any Parameters for the Operator instance in string format.
:type operator_params: str
:param configuration_protected_settings: Name-value pairs of protected configuration settings
for the configuration.
:type configuration_protected_settings: dict[str, str]
:param operator_scope: Scope at which the operator will be installed. Possible values include:
"cluster", "namespace". Default value: "cluster".
:type operator_scope: str or
~azure.mgmt.kubernetesconfiguration.v2020_10_01_preview.models.OperatorScopeType
:ivar repository_public_key: Public Key associated with this SourceControl configuration
(either generated within the cluster or provided by the user).
:vartype repository_public_key: str
:param ssh_known_hosts_contents: Base64-encoded known_hosts contents containing public SSH keys
required to access private Git instances.
:type ssh_known_hosts_contents: str
:param enable_helm_operator: Option to enable Helm Operator for this git configuration.
:type enable_helm_operator: bool
:param helm_operator_properties: Properties for Helm operator.
:type helm_operator_properties:
~azure.mgmt.kubernetesconfiguration.v2020_10_01_preview.models.HelmOperatorProperties
:ivar provisioning_state: The provisioning state of the resource provider. Possible values
include: "Accepted", "Deleting", "Running", "Succeeded", "Failed".
:vartype provisioning_state: str or
~azure.mgmt.kubernetesconfiguration.v2020_10_01_preview.models.ProvisioningStateType
:ivar compliance_status: Compliance Status of the Configuration.
:vartype compliance_status:
~azure.mgmt.kubernetesconfiguration.v2020_10_01_preview.models.ComplianceStatus
"""
_validation = {
'id': {'readonly': True},
'name': {'readonly': True},
'type': {'readonly': True},
'repository_public_key': {'readonly': True},
'provisioning_state': {'readonly': True},
'compliance_status': {'readonly': True},
}
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
'name': {'key': 'name', 'type': 'str'},
'type': {'key': 'type', 'type': 'str'},
'system_data': {'key': 'systemData', 'type': 'SystemData'},
'repository_url': {'key': 'properties.repositoryUrl', 'type': 'str'},
'operator_namespace': {'key': 'properties.operatorNamespace', 'type': 'str'},
'operator_instance_name': {'key': 'properties.operatorInstanceName', 'type': 'str'},
'operator_type': {'key': 'properties.operatorType', 'type': 'str'},
'operator_params': {'key': 'properties.operatorParams', 'type': 'str'},
'configuration_protected_settings': {'key': 'properties.configurationProtectedSettings', 'type': '{str}'},
'operator_scope': {'key': 'properties.operatorScope', 'type': 'str'},
'repository_public_key': {'key': 'properties.repositoryPublicKey', 'type': 'str'},
'ssh_known_hosts_contents': {'key': 'properties.sshKnownHostsContents', 'type': 'str'},
'enable_helm_operator': {'key': 'properties.enableHelmOperator', 'type': 'bool'},
'helm_operator_properties': {'key': 'properties.helmOperatorProperties', 'type': 'HelmOperatorProperties'},
'provisioning_state': {'key': 'properties.provisioningState', 'type': 'str'},
'compliance_status': {'key': 'properties.complianceStatus', 'type': 'ComplianceStatus'},
}
def __init__(
self,
*,
system_data: Optional["SystemData"] = None,
repository_url: Optional[str] = None,
operator_namespace: Optional[str] = "default",
operator_instance_name: Optional[str] = None,
operator_type: Optional[Union[str, "OperatorType"]] = None,
operator_params: Optional[str] = None,
configuration_protected_settings: Optional[Dict[str, str]] = None,
operator_scope: Optional[Union[str, "OperatorScopeType"]] = "cluster",
ssh_known_hosts_contents: Optional[str] = None,
enable_helm_operator: Optional[bool] = None,
helm_operator_properties: Optional["HelmOperatorProperties"] = None,
**kwargs
):
super(SourceControlConfiguration, self).__init__(system_data=system_data, **kwargs)
self.repository_url = repository_url
self.operator_namespace = operator_namespace
self.operator_instance_name = operator_instance_name
self.operator_type = operator_type
self.operator_params = operator_params
self.configuration_protected_settings = configuration_protected_settings
self.operator_scope = operator_scope
self.repository_public_key = None
self.ssh_known_hosts_contents = ssh_known_hosts_contents
self.enable_helm_operator = enable_helm_operator
self.helm_operator_properties = helm_operator_properties
self.provisioning_state = None
self.compliance_status = None
class SourceControlConfigurationList(msrest.serialization.Model):
"""Result of the request to list Source Control Configurations. It contains a list of SourceControlConfiguration objects and a URL link to get the next set of results.
Variables are only populated by the server, and will be ignored when sending a request.
:ivar value: List of Source Control Configurations within a Kubernetes cluster.
:vartype value:
list[~azure.mgmt.kubernetesconfiguration.v2020_10_01_preview.models.SourceControlConfiguration]
:ivar next_link: URL to get the next set of configuration objects, if any.
:vartype next_link: str
"""
_validation = {
'value': {'readonly': True},
'next_link': {'readonly': True},
}
_attribute_map = {
'value': {'key': 'value', 'type': '[SourceControlConfiguration]'},
'next_link': {'key': 'nextLink', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
super(SourceControlConfigurationList, self).__init__(**kwargs)
self.value = None
self.next_link = None
class SystemData(msrest.serialization.Model):
"""Top level metadata https://github.com/Azure/azure-resource-manager-rpc/blob/master/v1.0/common-api-contracts.md#system-metadata-for-all-azure-resources.
Variables are only populated by the server, and will be ignored when sending a request.
:ivar created_by: A string identifier for the identity that created the resource.
:vartype created_by: str
:ivar created_by_type: The type of identity that created the resource: user, application,
managedIdentity, key.
:vartype created_by_type: str
:ivar created_at: The timestamp of resource creation (UTC).
:vartype created_at: ~datetime.datetime
:ivar last_modified_by: A string identifier for the identity that last modified the resource.
:vartype last_modified_by: str
:ivar last_modified_by_type: The type of identity that last modified the resource: user,
application, managedIdentity, key.
:vartype last_modified_by_type: str
:ivar last_modified_at: The timestamp of resource last modification (UTC).
:vartype last_modified_at: ~datetime.datetime
"""
_validation = {
'created_by': {'readonly': True},
'created_by_type': {'readonly': True},
'created_at': {'readonly': True},
'last_modified_by': {'readonly': True},
'last_modified_by_type': {'readonly': True},
'last_modified_at': {'readonly': True},
}
_attribute_map = {
'created_by': {'key': 'createdBy', 'type': 'str'},
'created_by_type': {'key': 'createdByType', 'type': 'str'},
'created_at': {'key': 'createdAt', 'type': 'iso-8601'},
'last_modified_by': {'key': 'lastModifiedBy', 'type': 'str'},
'last_modified_by_type': {'key': 'lastModifiedByType', 'type': 'str'},
'last_modified_at': {'key': 'lastModifiedAt', 'type': 'iso-8601'},
}
def __init__(
self,
**kwargs
):
super(SystemData, self).__init__(**kwargs)
self.created_by = None
self.created_by_type = None
self.created_at = None
self.last_modified_by = None
self.last_modified_by_type = None
self.last_modified_at = None
|
|
import os
import re
import struct
import sys
import textwrap
sys.path.insert(0, os.path.dirname(__file__))
import ufunc_docstrings as docstrings
sys.path.pop(0)
Zero = "PyUFunc_Zero"
One = "PyUFunc_One"
None_ = "PyUFunc_None"
# Sentinel value to specify that the loop for the given TypeDescription uses the
# pointer to arrays as its func_data.
UsesArraysAsData = object()
class TypeDescription(object):
"""Type signature for a ufunc.
Attributes
----------
type : str
Character representing the nominal type.
func_data : str or None or UsesArraysAsData, optional
The string representing the expression to insert into the data array, if
any.
in_ : str or None, optional
The typecode(s) of the inputs.
out : str or None, optional
The typecode(s) of the outputs.
"""
def __init__(self, type, f=None, in_=None, out=None):
self.type = type
self.func_data = f
if in_ is not None:
in_ = in_.replace('P', type)
self.in_ = in_
if out is not None:
out = out.replace('P', type)
self.out = out
def finish_signature(self, nin, nout):
if self.in_ is None:
self.in_ = self.type * nin
assert len(self.in_) == nin
if self.out is None:
self.out = self.type * nout
assert len(self.out) == nout
_fdata_map = dict(f='npy_%sf', d='npy_%s', g='npy_%sl',
F='nc_%sf', D='nc_%s', G='nc_%sl')
def build_func_data(types, f):
func_data = []
for t in types:
d = _fdata_map.get(t, '%s') % (f,)
func_data.append(d)
return func_data
def TD(types, f=None, in_=None, out=None):
if f is not None:
if isinstance(f, str):
func_data = build_func_data(types, f)
else:
assert len(f) == len(types)
func_data = f
else:
func_data = (None,) * len(types)
if isinstance(in_, str):
in_ = (in_,) * len(types)
elif in_ is None:
in_ = (None,) * len(types)
if isinstance(out, str):
out = (out,) * len(types)
elif out is None:
out = (None,) * len(types)
tds = []
for t, fd, i, o in zip(types, func_data, in_, out):
tds.append(TypeDescription(t, f=fd, in_=i, out=o))
return tds
class Ufunc(object):
"""Description of a ufunc.
Attributes
----------
nin: number of input arguments
nout: number of output arguments
identity: identity element for a two-argument function
docstring: docstring for the ufunc
type_descriptions: list of TypeDescription objects
"""
def __init__(self, nin, nout, identity, docstring,
*type_descriptions):
self.nin = nin
self.nout = nout
if identity is None:
identity = None_
self.identity = identity
self.docstring = docstring
self.type_descriptions = []
for td in type_descriptions:
self.type_descriptions.extend(td)
for td in self.type_descriptions:
td.finish_signature(self.nin, self.nout)
# String-handling utilities to avoid locale-dependence.
import string
UPPER_TABLE = string.maketrans(string.ascii_lowercase, string.ascii_uppercase)
def english_upper(s):
""" Apply English case rules to convert ASCII strings to all upper case.
This is an internal utility function to replace calls to str.upper() such
that we can avoid changing behavior with changing locales. In particular,
Turkish has distinct dotted and dotless variants of the Latin letter "I" in
both lowercase and uppercase. Thus, "i".upper() != "I" in a "tr" locale.
Parameters
----------
s : str
Returns
-------
uppered : str
Examples
--------
>>> from numpy.lib.utils import english_upper
>>> english_upper('ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789_')
'ABCDEFGHIJKLMNOPQRSTUVWXYZABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789_'
>>> english_upper('')
''
"""
uppered = s.translate(UPPER_TABLE)
return uppered
#each entry in defdict is a Ufunc object.
#name: [string of chars for which it is defined,
# string of characters using func interface,
# tuple of strings giving funcs for data,
# (in, out), or (instr, outstr) giving the signature as character codes,
# identity,
# docstring,
# output specification (optional)
# ]
chartoname = {'?': 'bool',
'b': 'byte',
'B': 'ubyte',
'h': 'short',
'H': 'ushort',
'i': 'int',
'I': 'uint',
'l': 'long',
'L': 'ulong',
'q': 'longlong',
'Q': 'ulonglong',
'f': 'float',
'd': 'double',
'g': 'longdouble',
'F': 'cfloat',
'D': 'cdouble',
'G': 'clongdouble',
'M': 'datetime',
'm': 'timedelta',
'O': 'OBJECT',
# '.' is like 'O', but calls a method of the object instead
# of a function
'P': 'OBJECT',
}
all = '?bBhHiIlLqQfdgFDGOMm'
O = 'O'
P = 'P'
ints = 'bBhHiIlLqQ'
times = 'Mm'
intsO = ints + O
bints = '?' + ints
bintsO = bints + O
flts = 'fdg'
fltsO = flts + O
fltsP = flts + P
cmplx = 'FDG'
cmplxO = cmplx + O
cmplxP = cmplx + P
inexact = flts + cmplx
noint = inexact+O
nointP = inexact+P
allP = bints+times+flts+cmplxP
nobool = all[1:]
noobj = all[:-3]+all[-2:]
nobool_or_obj = all[1:-3]+all[-2:]
intflt = ints+flts
intfltcmplx = ints+flts+cmplx
nocmplx = bints+times+flts
nocmplxO = nocmplx+O
nocmplxP = nocmplx+P
notimes_or_obj = bints + inexact
# Find which code corresponds to int64.
int64 = ''
uint64 = ''
for code in 'bhilq':
if struct.calcsize(code) == 8:
int64 = code
uint64 = english_upper(code)
break
defdict = {
'add' :
Ufunc(2, 1, Zero,
docstrings.get('numpy.core.umath.add'),
TD(notimes_or_obj),
[TypeDescription('M', UsesArraysAsData, 'Mm', 'M'),
TypeDescription('m', UsesArraysAsData, 'mm', 'm'),
TypeDescription('M', UsesArraysAsData, 'mM', 'M'),
],
TD(O, f='PyNumber_Add'),
),
'subtract' :
Ufunc(2, 1, Zero,
docstrings.get('numpy.core.umath.subtract'),
TD(notimes_or_obj),
[TypeDescription('M', UsesArraysAsData, 'Mm', 'M'),
TypeDescription('m', UsesArraysAsData, 'mm', 'm'),
TypeDescription('M', UsesArraysAsData, 'MM', 'm'),
],
TD(O, f='PyNumber_Subtract'),
),
'multiply' :
Ufunc(2, 1, One,
docstrings.get('numpy.core.umath.multiply'),
TD(notimes_or_obj),
TD(O, f='PyNumber_Multiply'),
),
'divide' :
Ufunc(2, 1, One,
docstrings.get('numpy.core.umath.divide'),
TD(intfltcmplx),
TD(O, f='PyNumber_Divide'),
),
'floor_divide' :
Ufunc(2, 1, One,
docstrings.get('numpy.core.umath.floor_divide'),
TD(intfltcmplx),
TD(O, f='PyNumber_FloorDivide'),
),
'true_divide' :
Ufunc(2, 1, One,
docstrings.get('numpy.core.umath.true_divide'),
TD('bBhH', out='d'),
TD('iIlLqQ', out='d'),
TD(flts+cmplx),
TD(O, f='PyNumber_TrueDivide'),
),
'conjugate' :
Ufunc(1, 1, None,
docstrings.get('numpy.core.umath.conjugate'),
TD(ints+flts+cmplx),
TD(P, f='conjugate'),
),
'fmod' :
Ufunc(2, 1, Zero,
docstrings.get('numpy.core.umath.fmod'),
TD(ints),
TD(flts, f='fmod'),
TD(P, f='fmod'),
),
'square' :
Ufunc(1, 1, None,
docstrings.get('numpy.core.umath.square'),
TD(ints+inexact),
TD(O, f='Py_square'),
),
'reciprocal' :
Ufunc(1, 1, None,
docstrings.get('numpy.core.umath.reciprocal'),
TD(ints+inexact),
TD(O, f='Py_reciprocal'),
),
'ones_like' :
Ufunc(1, 1, None,
docstrings.get('numpy.core.umath.ones_like'),
TD(noobj),
TD(O, f='Py_get_one'),
),
'power' :
Ufunc(2, 1, One,
docstrings.get('numpy.core.umath.power'),
TD(ints),
TD(inexact, f='pow'),
TD(O, f='npy_ObjectPower'),
),
'absolute' :
Ufunc(1, 1, None,
docstrings.get('numpy.core.umath.absolute'),
TD(bints+flts+times),
TD(cmplx, out=('f', 'd', 'g')),
TD(O, f='PyNumber_Absolute'),
),
'negative' :
Ufunc(1, 1, None,
docstrings.get('numpy.core.umath.negative'),
TD(bints+flts+times),
TD(cmplx, f='neg'),
TD(O, f='PyNumber_Negative'),
),
'sign' :
Ufunc(1, 1, None,
docstrings.get('numpy.core.umath.sign'),
TD(nobool),
),
'greater' :
Ufunc(2, 1, None,
docstrings.get('numpy.core.umath.greater'),
TD(all, out='?'),
),
'greater_equal' :
Ufunc(2, 1, None,
docstrings.get('numpy.core.umath.greater_equal'),
TD(all, out='?'),
),
'less' :
Ufunc(2, 1, None,
docstrings.get('numpy.core.umath.less'),
TD(all, out='?'),
),
'less_equal' :
Ufunc(2, 1, None,
docstrings.get('numpy.core.umath.less_equal'),
TD(all, out='?'),
),
'equal' :
Ufunc(2, 1, None,
docstrings.get('numpy.core.umath.equal'),
TD(all, out='?'),
),
'not_equal' :
Ufunc(2, 1, None,
docstrings.get('numpy.core.umath.not_equal'),
TD(all, out='?'),
),
'logical_and' :
Ufunc(2, 1, One,
docstrings.get('numpy.core.umath.logical_and'),
TD(noobj, out='?'),
TD(P, f='logical_and'),
),
'logical_not' :
Ufunc(1, 1, None,
docstrings.get('numpy.core.umath.logical_not'),
TD(noobj, out='?'),
TD(P, f='logical_not'),
),
'logical_or' :
Ufunc(2, 1, Zero,
docstrings.get('numpy.core.umath.logical_or'),
TD(noobj, out='?'),
TD(P, f='logical_or'),
),
'logical_xor' :
Ufunc(2, 1, None,
docstrings.get('numpy.core.umath.logical_xor'),
TD(noobj, out='?'),
TD(P, f='logical_xor'),
),
'maximum' :
Ufunc(2, 1, None,
docstrings.get('numpy.core.umath.maximum'),
TD(noobj),
TD(O, f='npy_ObjectMax')
),
'minimum' :
Ufunc(2, 1, None,
docstrings.get('numpy.core.umath.minimum'),
TD(noobj),
TD(O, f='npy_ObjectMin')
),
'fmax' :
Ufunc(2, 1, None,
docstrings.get('numpy.core.umath.fmax'),
TD(noobj),
TD(O, f='npy_ObjectMax')
),
'fmin' :
Ufunc(2, 1, None,
docstrings.get('numpy.core.umath.fmin'),
TD(noobj),
TD(O, f='npy_ObjectMin')
),
'logaddexp' :
Ufunc(2, 1, None,
docstrings.get('numpy.core.umath.logaddexp'),
TD(flts, f="logaddexp")
),
'logaddexp2' :
Ufunc(2, 1, None,
docstrings.get('numpy.core.umath.logaddexp2'),
TD(flts, f="logaddexp2")
),
# FIXME: decide if the times should have the bitwise operations.
'bitwise_and' :
Ufunc(2, 1, One,
docstrings.get('numpy.core.umath.bitwise_and'),
TD(bints),
TD(O, f='PyNumber_And'),
),
'bitwise_or' :
Ufunc(2, 1, Zero,
docstrings.get('numpy.core.umath.bitwise_or'),
TD(bints),
TD(O, f='PyNumber_Or'),
),
'bitwise_xor' :
Ufunc(2, 1, None,
docstrings.get('numpy.core.umath.bitwise_xor'),
TD(bints),
TD(O, f='PyNumber_Xor'),
),
'invert' :
Ufunc(1, 1, None,
docstrings.get('numpy.core.umath.invert'),
TD(bints),
TD(O, f='PyNumber_Invert'),
),
'left_shift' :
Ufunc(2, 1, None,
docstrings.get('numpy.core.umath.left_shift'),
TD(ints),
TD(O, f='PyNumber_Lshift'),
),
'right_shift' :
Ufunc(2, 1, None,
docstrings.get('numpy.core.umath.right_shift'),
TD(ints),
TD(O, f='PyNumber_Rshift'),
),
'degrees' :
Ufunc(1, 1, None,
docstrings.get('numpy.core.umath.degrees'),
TD(fltsP, f='degrees'),
),
'rad2deg' :
Ufunc(1, 1, None,
docstrings.get('numpy.core.umath.rad2deg'),
TD(fltsP, f='rad2deg'),
),
'radians' :
Ufunc(1, 1, None,
docstrings.get('numpy.core.umath.radians'),
TD(fltsP, f='radians'),
),
'deg2rad' :
Ufunc(1, 1, None,
docstrings.get('numpy.core.umath.deg2rad'),
TD(fltsP, f='deg2rad'),
),
'arccos' :
Ufunc(1, 1, None,
docstrings.get('numpy.core.umath.arccos'),
TD(inexact, f='acos'),
TD(P, f='arccos'),
),
'arccosh' :
Ufunc(1, 1, None,
docstrings.get('numpy.core.umath.arccosh'),
TD(inexact, f='acosh'),
TD(P, f='arccosh'),
),
'arcsin' :
Ufunc(1, 1, None,
docstrings.get('numpy.core.umath.arcsin'),
TD(inexact, f='asin'),
TD(P, f='arcsin'),
),
'arcsinh' :
Ufunc(1, 1, None,
docstrings.get('numpy.core.umath.arcsinh'),
TD(inexact, f='asinh'),
TD(P, f='arcsinh'),
),
'arctan' :
Ufunc(1, 1, None,
docstrings.get('numpy.core.umath.arctan'),
TD(inexact, f='atan'),
TD(P, f='arctan'),
),
'arctanh' :
Ufunc(1, 1, None,
docstrings.get('numpy.core.umath.arctanh'),
TD(inexact, f='atanh'),
TD(P, f='arctanh'),
),
'cos' :
Ufunc(1, 1, None,
docstrings.get('numpy.core.umath.cos'),
TD(inexact, f='cos'),
TD(P, f='cos'),
),
'sin' :
Ufunc(1, 1, None,
docstrings.get('numpy.core.umath.sin'),
TD(inexact, f='sin'),
TD(P, f='sin'),
),
'tan' :
Ufunc(1, 1, None,
docstrings.get('numpy.core.umath.tan'),
TD(inexact, f='tan'),
TD(P, f='tan'),
),
'cosh' :
Ufunc(1, 1, None,
docstrings.get('numpy.core.umath.cosh'),
TD(inexact, f='cosh'),
TD(P, f='cosh'),
),
'sinh' :
Ufunc(1, 1, None,
docstrings.get('numpy.core.umath.sinh'),
TD(inexact, f='sinh'),
TD(P, f='sinh'),
),
'tanh' :
Ufunc(1, 1, None,
docstrings.get('numpy.core.umath.tanh'),
TD(inexact, f='tanh'),
TD(P, f='tanh'),
),
'exp' :
Ufunc(1, 1, None,
docstrings.get('numpy.core.umath.exp'),
TD(inexact, f='exp'),
TD(P, f='exp'),
),
'exp2' :
Ufunc(1, 1, None,
docstrings.get('numpy.core.umath.exp2'),
TD(inexact, f='exp2'),
TD(P, f='exp2'),
),
'expm1' :
Ufunc(1, 1, None,
docstrings.get('numpy.core.umath.expm1'),
TD(inexact, f='expm1'),
TD(P, f='expm1'),
),
'log' :
Ufunc(1, 1, None,
docstrings.get('numpy.core.umath.log'),
TD(inexact, f='log'),
TD(P, f='log'),
),
'log2' :
Ufunc(1, 1, None,
docstrings.get('numpy.core.umath.log2'),
TD(inexact, f='log2'),
TD(P, f='log2'),
),
'log10' :
Ufunc(1, 1, None,
docstrings.get('numpy.core.umath.log10'),
TD(inexact, f='log10'),
TD(P, f='log10'),
),
'log1p' :
Ufunc(1, 1, None,
docstrings.get('numpy.core.umath.log1p'),
TD(inexact, f='log1p'),
TD(P, f='log1p'),
),
'sqrt' :
Ufunc(1, 1, None,
docstrings.get('numpy.core.umath.sqrt'),
TD(inexact, f='sqrt'),
TD(P, f='sqrt'),
),
'ceil' :
Ufunc(1, 1, None,
docstrings.get('numpy.core.umath.ceil'),
TD(flts, f='ceil'),
TD(P, f='ceil'),
),
'trunc' :
Ufunc(1, 1, None,
docstrings.get('numpy.core.umath.trunc'),
TD(flts, f='trunc'),
TD(P, f='trunc'),
),
'fabs' :
Ufunc(1, 1, None,
docstrings.get('numpy.core.umath.fabs'),
TD(flts, f='fabs'),
TD(P, f='fabs'),
),
'floor' :
Ufunc(1, 1, None,
docstrings.get('numpy.core.umath.floor'),
TD(flts, f='floor'),
TD(P, f='floor'),
),
'rint' :
Ufunc(1, 1, None,
docstrings.get('numpy.core.umath.rint'),
TD(inexact, f='rint'),
TD(P, f='rint'),
),
'arctan2' :
Ufunc(2, 1, None,
docstrings.get('numpy.core.umath.arctan2'),
TD(flts, f='atan2'),
TD(P, f='arctan2'),
),
'remainder' :
Ufunc(2, 1, None,
docstrings.get('numpy.core.umath.remainder'),
TD(intflt),
TD(O, f='PyNumber_Remainder'),
),
'hypot' :
Ufunc(2, 1, None,
docstrings.get('numpy.core.umath.hypot'),
TD(flts, f='hypot'),
TD(P, f='hypot'),
),
'isnan' :
Ufunc(1, 1, None,
docstrings.get('numpy.core.umath.isnan'),
TD(inexact, out='?'),
),
'isinf' :
Ufunc(1, 1, None,
docstrings.get('numpy.core.umath.isinf'),
TD(inexact, out='?'),
),
'isfinite' :
Ufunc(1, 1, None,
docstrings.get('numpy.core.umath.isfinite'),
TD(inexact, out='?'),
),
'signbit' :
Ufunc(1, 1, None,
docstrings.get('numpy.core.umath.signbit'),
TD(flts, out='?'),
),
'copysign' :
Ufunc(2, 1, None,
docstrings.get('numpy.core.umath.copysign'),
TD(flts),
),
'modf' :
Ufunc(1, 2, None,
docstrings.get('numpy.core.umath.modf'),
TD(flts),
),
}
def indent(st,spaces):
indention = ' '*spaces
indented = indention + st.replace('\n','\n'+indention)
# trim off any trailing spaces
indented = re.sub(r' +$',r'',indented)
return indented
chartotype1 = {'f': 'f_f',
'd': 'd_d',
'g': 'g_g',
'F': 'F_F',
'D': 'D_D',
'G': 'G_G',
'O': 'O_O',
'P': 'O_O_method'}
chartotype2 = {'f': 'ff_f',
'd': 'dd_d',
'g': 'gg_g',
'F': 'FF_F',
'D': 'DD_D',
'G': 'GG_G',
'O': 'OO_O',
'P': 'OO_O_method'}
#for each name
# 1) create functions, data, and signature
# 2) fill in functions and data in InitOperators
# 3) add function.
def make_arrays(funcdict):
# functions array contains an entry for every type implemented
# NULL should be placed where PyUfunc_ style function will be filled in later
#
code1list = []
code2list = []
names = funcdict.keys()
names.sort()
for name in names:
uf = funcdict[name]
funclist = []
datalist = []
siglist = []
k = 0
sub = 0
if uf.nin > 1:
assert uf.nin == 2
thedict = chartotype2 # two inputs and one output
else:
thedict = chartotype1 # one input and one output
for t in uf.type_descriptions:
if t.func_data not in (None, UsesArraysAsData):
funclist.append('NULL')
astr = '%s_functions[%d] = PyUFunc_%s;' % \
(name, k, thedict[t.type])
code2list.append(astr)
if t.type == 'O':
astr = '%s_data[%d] = (void *) %s;' % \
(name, k, t.func_data)
code2list.append(astr)
datalist.append('(void *)NULL')
elif t.type == 'P':
datalist.append('(void *)"%s"' % t.func_data)
else:
astr = '%s_data[%d] = (void *) %s;' % \
(name, k, t.func_data)
code2list.append(astr)
datalist.append('(void *)NULL')
#datalist.append('(void *)%s' % t.func_data)
sub += 1
elif t.func_data is UsesArraysAsData:
tname = english_upper(chartoname[t.type])
datalist.append('(void *)NULL')
funclist.append('%s_%s_%s_%s' % (tname, t.in_, t.out, name))
code2list.append('PyUFunc_SetUsesArraysAsData(%s_data, %s);' % (name, k))
else:
datalist.append('(void *)NULL')
tname = english_upper(chartoname[t.type])
funclist.append('%s_%s' % (tname, name))
for x in t.in_ + t.out:
siglist.append('PyArray_%s' % (english_upper(chartoname[x]),))
k += 1
funcnames = ', '.join(funclist)
signames = ', '.join(siglist)
datanames = ', '.join(datalist)
code1list.append("static PyUFuncGenericFunction %s_functions[] = { %s };" \
% (name, funcnames))
code1list.append("static void * %s_data[] = { %s };" \
% (name, datanames))
code1list.append("static char %s_signatures[] = { %s };" \
% (name, signames))
return "\n".join(code1list),"\n".join(code2list)
def make_ufuncs(funcdict):
code3list = []
names = funcdict.keys()
names.sort()
for name in names:
uf = funcdict[name]
mlist = []
docstring = textwrap.dedent(uf.docstring).strip()
docstring = docstring.encode('string-escape').replace(r'"', r'\"')
# Split the docstring because some compilers (like MS) do not like big
# string literal in C code. We split at endlines because textwrap.wrap
# do not play well with \n
docstring = '\\n\"\"'.join(docstring.split(r"\n"))
mlist.append(\
r"""f = PyUFunc_FromFuncAndData(%s_functions, %s_data, %s_signatures, %d,
%d, %d, %s, "%s",
"%s", 0);""" % (name, name, name,
len(uf.type_descriptions),
uf.nin, uf.nout,
uf.identity,
name, docstring))
mlist.append(r"""PyDict_SetItemString(dictionary, "%s", f);""" % name)
mlist.append(r"""Py_DECREF(f);""")
code3list.append('\n'.join(mlist))
return '\n'.join(code3list)
def make_code(funcdict,filename):
code1, code2 = make_arrays(funcdict)
code3 = make_ufuncs(funcdict)
code2 = indent(code2,4)
code3 = indent(code3,4)
code = r"""
/** Warning this file is autogenerated!!!
Please make changes to the code generator program (%s)
**/
%s
static void
InitOperators(PyObject *dictionary) {
PyObject *f;
%s
%s
}
""" % (filename, code1, code2, code3)
return code;
if __name__ == "__main__":
filename = __file__
fid = open('__umath_generated.c','w')
code = make_code(defdict, filename)
fid.write(code)
fid.close()
|
|
from __future__ import with_statement
import os
import posixpath
import stat
import re
import uuid
from fnmatch import filter as fnfilter
from fabric.state import output, connections, env
from fabric.utils import warn
from fabric.context_managers import settings
# TODO: use self.sftp.listdir_iter on Paramiko 1.15+
def _format_local(local_path, local_is_path):
"""Format a path for log output"""
if local_is_path:
return local_path
else:
# This allows users to set a name attr on their StringIO objects
# just like an open file object would have
return getattr(local_path, 'name', '<file obj>')
class SFTP(object):
"""
SFTP helper class, which is also a facade for ssh.SFTPClient.
"""
def __init__(self, host_string):
self.ftp = connections[host_string].open_sftp()
# Recall that __getattr__ is the "fallback" attribute getter, and is thus
# pretty safe to use for facade-like behavior as we're doing here.
def __getattr__(self, attr):
return getattr(self.ftp, attr)
def isdir(self, path):
try:
return stat.S_ISDIR(self.ftp.stat(path).st_mode)
except IOError:
return False
def islink(self, path):
try:
return stat.S_ISLNK(self.ftp.lstat(path).st_mode)
except IOError:
return False
def exists(self, path):
try:
self.ftp.lstat(path).st_mode
except IOError:
return False
return True
def glob(self, path):
from fabric.state import win32
dirpart, pattern = os.path.split(path)
rlist = self.ftp.listdir(dirpart)
names = fnfilter([f for f in rlist if not f[0] == '.'], pattern)
ret = []
if len(names):
s = '/'
ret = [dirpart.rstrip(s) + s + name.lstrip(s) for name in names]
if not win32:
ret = [posixpath.join(dirpart, name) for name in names]
return ret
def walk(self, top, topdown=True, onerror=None, followlinks=False):
from os.path import join
# We may not have read permission for top, in which case we can't get a
# list of the files the directory contains. os.path.walk always
# suppressed the exception then, rather than blow up for a minor reason
# when (say) a thousand readable directories are still left to visit.
# That logic is copied here.
try:
# Note that listdir and error are globals in this module due to
# earlier import-*.
names = self.ftp.listdir(top)
except Exception, err:
if onerror is not None:
onerror(err)
return
dirs, nondirs = [], []
for name in names:
if self.isdir(join(top, name)):
dirs.append(name)
else:
nondirs.append(name)
if topdown:
yield top, dirs, nondirs
for name in dirs:
path = join(top, name)
if followlinks or not self.islink(path):
for x in self.walk(path, topdown, onerror, followlinks):
yield x
if not topdown:
yield top, dirs, nondirs
def mkdir(self, path, use_sudo):
from fabric.api import sudo, hide
if use_sudo:
with hide('everything'):
sudo('mkdir "%s"' % path)
else:
self.ftp.mkdir(path)
def get(self, remote_path, local_path, use_sudo, local_is_path, rremote=None, temp_dir=""):
from fabric.api import sudo, hide
# rremote => relative remote path, so get(/var/log) would result in
# this function being called with
# remote_path=/var/log/apache2/access.log and
# rremote=apache2/access.log
rremote = rremote if rremote is not None else remote_path
# Handle format string interpolation (e.g. %(dirname)s)
path_vars = {
'host': env.host_string.replace(':', '-'),
'basename': os.path.basename(rremote),
'dirname': os.path.dirname(rremote),
'path': rremote
}
if local_is_path:
# Fix for issue #711 and #1348 - escape %'s as well as possible.
format_re = r'(%%(?!\((?:%s)\)\w))' % '|'.join(path_vars.keys())
escaped_path = re.sub(format_re, r'%\1', local_path)
local_path = os.path.abspath(escaped_path % path_vars)
# Ensure we give ssh.SFTPCLient a file by prepending and/or
# creating local directories as appropriate.
dirpath, filepath = os.path.split(local_path)
if dirpath and not os.path.exists(dirpath):
os.makedirs(dirpath)
if os.path.isdir(local_path):
local_path = os.path.join(local_path, path_vars['basename'])
if output.running:
print("[%s] download: %s <- %s" % (
env.host_string,
_format_local(local_path, local_is_path),
remote_path
))
# Warn about overwrites, but keep going
if local_is_path and os.path.exists(local_path):
msg = "Local file %s already exists and is being overwritten."
warn(msg % local_path)
# When using sudo, "bounce" the file through a guaranteed-unique file
# path in the default remote CWD (which, typically, the login user will
# have write permissions on) in order to sudo(cp) it.
if use_sudo:
target_path = posixpath.join(temp_dir, uuid.uuid4().hex)
# Temporarily nuke 'cwd' so sudo() doesn't "cd" its mv command.
# (The target path has already been cwd-ified elsewhere.)
with settings(hide('everything'), cwd=""):
sudo('cp -p "%s" "%s"' % (remote_path, target_path))
# The user should always own the copied file.
sudo('chown %s "%s"' % (env.user, target_path))
# Only root and the user has the right to read the file
sudo('chmod %o "%s"' % (0400, target_path))
remote_path = target_path
try:
# File-like objects: reset to file seek 0 (to ensure full overwrite)
# and then use Paramiko's getfo() directly
getter = self.ftp.get
if not local_is_path:
local_path.seek(0)
getter = self.ftp.getfo
getter(remote_path, local_path)
finally:
# try to remove the temporary file after the download
if use_sudo:
with settings(hide('everything'), cwd=""):
sudo('rm -f "%s"' % remote_path)
# Return local_path object for posterity. (If mutated, caller will want
# to know.)
return local_path
def get_dir(self, remote_path, local_path, use_sudo, temp_dir):
# Decide what needs to be stripped from remote paths so they're all
# relative to the given remote_path
if os.path.basename(remote_path):
strip = os.path.dirname(remote_path)
else:
strip = os.path.dirname(os.path.dirname(remote_path))
# Store all paths gotten so we can return them when done
result = []
# Use our facsimile of os.walk to find all files within remote_path
for context, dirs, files in self.walk(remote_path):
# Normalize current directory to be relative
# E.g. remote_path of /var/log and current dir of /var/log/apache2
# would be turned into just 'apache2'
lcontext = rcontext = context.replace(strip, '', 1).lstrip('/')
# Prepend local path to that to arrive at the local mirrored
# version of this directory. So if local_path was 'mylogs', we'd
# end up with 'mylogs/apache2'
lcontext = os.path.join(local_path, lcontext)
# Download any files in current directory
for f in files:
# Construct full and relative remote paths to this file
rpath = posixpath.join(context, f)
rremote = posixpath.join(rcontext, f)
# If local_path isn't using a format string that expands to
# include its remote path, we need to add it here.
if "%(path)s" not in local_path \
and "%(dirname)s" not in local_path:
lpath = os.path.join(lcontext, f)
# Otherwise, just passthrough local_path to self.get()
else:
lpath = local_path
# Now we can make a call to self.get() with specific file paths
# on both ends.
result.append(self.get(rpath, lpath, use_sudo, True, rremote, temp_dir))
return result
def put(self, local_path, remote_path, use_sudo, mirror_local_mode, mode,
local_is_path, temp_dir):
from fabric.api import sudo, hide
pre = self.ftp.getcwd()
pre = pre if pre else ''
if local_is_path and self.isdir(remote_path):
basename = os.path.basename(local_path)
remote_path = posixpath.join(remote_path, basename)
if output.running:
print("[%s] put: %s -> %s" % (
env.host_string,
_format_local(local_path, local_is_path),
posixpath.join(pre, remote_path)
))
# When using sudo, "bounce" the file through a guaranteed-unique file
# path in the default remote CWD (which, typically, the login user will
# have write permissions on) in order to sudo(mv) it later.
if use_sudo:
target_path = remote_path
remote_path = posixpath.join(temp_dir, uuid.uuid4().hex)
# Read, ensuring we handle file-like objects correct re: seek pointer
putter = self.ftp.put
if not local_is_path:
old_pointer = local_path.tell()
local_path.seek(0)
putter = self.ftp.putfo
rattrs = putter(local_path, remote_path)
if not local_is_path:
local_path.seek(old_pointer)
# Handle modes if necessary
if (local_is_path and mirror_local_mode) or (mode is not None):
lmode = os.stat(local_path).st_mode if mirror_local_mode else mode
# Cast to octal integer in case of string
if isinstance(lmode, basestring):
lmode = int(lmode, 8)
lmode = lmode & 07777
rmode = rattrs.st_mode
# Only bitshift if we actually got an rmode
if rmode is not None:
rmode = (rmode & 07777)
if lmode != rmode:
if use_sudo:
# Temporarily nuke 'cwd' so sudo() doesn't "cd" its mv
# command. (The target path has already been cwd-ified
# elsewhere.)
with settings(hide('everything'), cwd=""):
sudo('chmod %o \"%s\"' % (lmode, remote_path))
else:
self.ftp.chmod(remote_path, lmode)
if use_sudo:
# Temporarily nuke 'cwd' so sudo() doesn't "cd" its mv command.
# (The target path has already been cwd-ified elsewhere.)
with settings(hide('everything'), cwd=""):
sudo("mv \"%s\" \"%s\"" % (remote_path, target_path))
# Revert to original remote_path for return value's sake
remote_path = target_path
return remote_path
def put_dir(self, local_path, remote_path, use_sudo, mirror_local_mode,
mode, temp_dir):
if os.path.basename(local_path):
strip = os.path.dirname(local_path)
else:
strip = os.path.dirname(os.path.dirname(local_path))
remote_paths = []
for context, dirs, files in os.walk(local_path):
rcontext = context.replace(strip, '', 1)
# normalize pathname separators with POSIX separator
rcontext = rcontext.replace(os.sep, '/')
rcontext = rcontext.lstrip('/')
rcontext = posixpath.join(remote_path, rcontext)
if not self.exists(rcontext):
self.mkdir(rcontext, use_sudo)
for d in dirs:
n = posixpath.join(rcontext, d)
if not self.exists(n):
self.mkdir(n, use_sudo)
for f in files:
local_path = os.path.join(context, f)
n = posixpath.join(rcontext, f)
p = self.put(local_path, n, use_sudo, mirror_local_mode, mode,
True, temp_dir)
remote_paths.append(p)
return remote_paths
|
|
# get the splines
import numpy as np
import scipy.interpolate as si
# TODO - BSpline.predict() -> allow x to be of any shape. return.shape = in.shape + (n_bases)
# MAYBE TODO - implement si.splev using keras.backend.
# - That way you don't have to hash the X_spline in memory.
class BSpline():
"""Class for computing the B-spline funcions b_i(x) and
constructing the penality matrix S.
# Arguments
start: float or int; start of the region
end: float or int; end of the region
n_bases: int; number of spline bases
spline_order: int; spline order
# Methods
- **getS(add_intercept=False)** - Get the penalty matrix S
- Arguments
- **add_intercept**: bool. If true, intercept column is added to the returned matrix.
- Returns
- `np.array`, of shape `(n_bases + add_intercept, n_bases + add_intercept)`
- **predict(x, add_intercept=False)** - For some x, predict the bn(x) for each base
- Arguments
- **x**: np.array; Vector of dimension 1
- **add_intercept**: bool; If True, intercept column is added to the to the final array
- Returns
- `np.array`, of shape `(len(x), n_bases + (add_intercept))`
"""
def __init__(self, start=0, end=1, n_bases=10, spline_order=3):
self.start = start
self.end = end
self.n_bases = n_bases
self.spline_order = spline_order
self.knots = get_knots(self.start, self.end, self.n_bases, self.spline_order)
self.S = get_S(self.n_bases, self.spline_order, add_intercept=False)
def __repr__(self):
return "BSpline(start={0}, end={1}, n_bases={2}, spline_order={3})".\
format(self.start, self.end, self.n_bases, self.spline_order)
def getS(self, add_intercept=False):
"""Get the penalty matrix S
Returns
np.array, of shape (n_bases + add_intercept, n_bases + add_intercept)
"""
S = self.S
if add_intercept is True:
# S <- cbind(0, rbind(0, S)) # in R
zeros = np.zeros_like(S[:1, :])
S = np.vstack([zeros, S])
zeros = np.zeros_like(S[:, :1])
S = np.hstack([zeros, S])
return S
def predict(self, x, add_intercept=False):
"""For some x, predict the bn(x) for each base
Arguments:
x: np.array; Vector of dimension 1
add_intercept: bool; should we add the intercept to the final array
Returns:
np.array, of shape (len(x), n_bases + (add_intercept))
"""
# sanity check
if x.min() < self.start:
raise Warning("x.min() < self.start")
if x.max() > self.end:
raise Warning("x.max() > self.end")
return get_X_spline(x=x,
knots=self.knots,
n_bases=self.n_bases,
spline_order=self.spline_order,
add_intercept=add_intercept)
def get_config(self):
return {"start": self.start,
"end": self.end,
"n_bases": self.n_bases,
"spline_order": self.spline_order
}
@classmethod
def from_config(cls, config):
return cls(**config)
############################################
# core functions
def get_gam_splines(start=0, end=100, n_bases=10, spline_order=3, add_intercept=True):
"""Main function required by (TF)Concise class
"""
# make sure n_bases is an int
assert type(n_bases) == int
x = np.arange(start, end + 1)
knots = get_knots(start, end, n_bases, spline_order)
X_splines = get_X_spline(x, knots, n_bases, spline_order, add_intercept)
S = get_S(n_bases, spline_order, add_intercept)
# Get the same knot positions as with mgcv
# https://github.com/cran/mgcv/blob/master/R/smooth.r#L1560
return X_splines, S, knots
############################################
# helper functions
# main resource:
# https://github.com/cran/mgcv/blob/master/R/smooth.r#L1560
def get_knots(start, end, n_bases=10, spline_order=3):
"""
Arguments:
x; np.array of dim 1
"""
x_range = end - start
start = start - x_range * 0.001
end = end + x_range * 0.001
# mgcv annotation
m = spline_order - 1
nk = n_bases - m # number of interior knots
dknots = (end - start) / (nk - 1)
knots = np.linspace(start=start - dknots * (m + 1),
stop=end + dknots * (m + 1),
num=nk + 2 * m + 2)
return knots.astype(np.float32)
# - get knots as arguments
def get_X_spline(x, knots, n_bases=10, spline_order=3, add_intercept=True):
"""
Returns:
np.array of shape [len(x), n_bases + (add_intercept)]
# BSpline formula
https://docs.scipy.org/doc/scipy/reference/generated/scipy.interpolate.BSpline.html#scipy.interpolate.BSpline
Fortran code:
https://github.com/scipy/scipy/blob/v0.19.0/scipy/interpolate/fitpack/splev.f
"""
if len(x.shape) is not 1:
raise ValueError("x has to be 1 dimentional")
tck = [knots, np.zeros(n_bases), spline_order]
X = np.zeros([len(x), n_bases])
for i in range(n_bases):
vec = np.zeros(n_bases)
vec[i] = 1.0
tck[1] = vec
X[:, i] = si.splev(x, tck, der=0)
if add_intercept is True:
ones = np.ones_like(X[:, :1])
X = np.hstack([ones, X])
return X.astype(np.float32)
def get_S(n_bases=10, spline_order=3, add_intercept=True):
# mvcv R-code
# S<-diag(object$bs.dim);
# if (m[2]) for (i in 1:m[2]) S <- diff(S)
# object$S <- list(t(S)%*%S) # get penalty
# object$S[[1]] <- (object$S[[1]]+t(object$S[[1]]))/2 # exact symmetry
S = np.identity(n_bases)
m2 = spline_order - 1 # m[2] is the same as m[1] by default
# m2 order differences
for i in range(m2):
S = np.diff(S, axis=0) # same as diff() in R
S = np.dot(S.T, S)
S = (S + S.T) / 2 # exact symmetry
if add_intercept is True:
# S <- cbind(0, rbind(0, S)) # in R
zeros = np.zeros_like(S[:1, :])
S = np.vstack([zeros, S])
zeros = np.zeros_like(S[:, :1])
S = np.hstack([zeros, S])
return S.astype(np.float32)
|
|
import logging
from subprocess import CalledProcessError
import tarfile
"""
Handles deployment of an installed problem.
Deploying a problem means creating one or more instances, which are each
templated with flags, the shell server URL, etc., and assigned a port
(if required for their problem type).
Flags and assigned ports will remain consistent for (problem, instance) pairs
across any shell servers that share the SHARED_ROOT directory.
However, instances must still be created individually on each shell server,
as server URLs must be templated appropriately, dependencies potentially
need to be installed on each server, and the underlying files, users and
service definitions that make up a deployed instance are specific to each
shell server.
"""
HIGHEST_PORT = 65535
LOWEST_PORT = 1025
CONTAINER_PORT = 5000
LOCALHOST = "127.0.0.1"
PROBLEM_FILES_DIR = "problem_files"
STATIC_FILE_ROOT = "static"
XINETD_SERVICE_PATH = "/etc/xinetd.d/"
TEMP_DEB_DIR = "/tmp/picoctf_debs/"
FLAG_FMT = "%s"
# will be set to the configuration module during deployment
shared_config = None
local_config = None
port_map = {}
current_problem = None
current_instance = None
containerize = False
logger = logging.getLogger(__name__)
def get_deploy_context():
"""
Returns the deployment context, a dictionary containing the current
config, port_map, problem, instance
"""
global shared_config, local_config, port_map, current_problem, current_instance
return {
"shared_config": shared_config,
"local_config": local_config,
"port_map": port_map,
"problem": current_problem,
"instance": current_instance,
}
port_random = None
# checks if the port is being used by a system process
def check_if_port_in_use(port):
import socket, errno
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
try:
s.bind((LOCALHOST, port))
except socket.error as e:
return True
s.close()
return False
def flag_fmt():
"""Used to shim the command line passed flag format into the challenge class"""
return FLAG_FMT
def give_port():
"""
Returns a random port and registers it, unless running in a container which
always sets the port to a constant CONTAINER_PORT.
"""
global port_random
if containerize:
logger.debug(
f"Running in a container. Assigning fixed port: {CONTAINER_PORT}"
)
return CONTAINER_PORT
context = get_deploy_context()
# default behavior
if context["shared_config"] is None:
return randint(LOWEST_PORT, HIGHEST_PORT)
if "banned_ports_parsed" not in context["shared_config"]:
banned_ports_result = []
for port_range in context["shared_config"].banned_ports:
banned_ports_result.extend(
list(range(port_range["start"], port_range["end"] + 1))
)
context["shared_config"]["banned_ports_parsed"] = banned_ports_result
# during real deployment, let's register a port
if port_random is None:
port_random = Random(context["shared_config"].deploy_secret)
# if this instance already has a port, reuse it
if (context["problem"], context["instance"]) in context["port_map"]:
assigned_port = context["port_map"][(context["problem"], context["instance"])]
if assigned_port is not None:
logger.debug(
f"This problem instance ({context['problem']}: {str(context['instance'])}) already has an assigned port: {str(assigned_port)}"
)
return assigned_port
used_ports = [port for port in context["port_map"].values() if port is not None]
if (
len(used_ports) + len(context["shared_config"].banned_ports_parsed)
== HIGHEST_PORT + 1
):
raise Exception("All usable ports are taken. Cannot deploy any more instances.")
# Added used ports to banned_ports_parsed.
for port in used_ports:
context["shared_config"].banned_ports_parsed.append(port)
# in case the port chosen is in use, try again.
loop_var = HIGHEST_PORT - len(context["shared_config"].banned_ports_parsed) + 1
while loop_var > 0:
# Get a random port that is random, not in the banned list, not in use, and not assigned before.
port = port_random.choice(
[
i
for i in range(LOWEST_PORT, HIGHEST_PORT)
if i not in context["shared_config"].banned_ports_parsed
]
)
if check_if_port_in_use(port):
loop_var -= 1
context["shared_config"].banned_ports_parsed.append(port)
continue
return port
raise Exception(
"Unable to assigned a port to this problem. All ports are either taken or used by the system."
)
import functools
import json
import os
import shutil
import subprocess
import traceback
from abc import ABCMeta
from ast import literal_eval
from copy import copy, deepcopy
from grp import getgrnam
from hashlib import md5, sha1
from importlib.machinery import SourceFileLoader
# These are below because of a circular import issue with problem.py and give_port
# [TODO] cleanup
from os.path import commonprefix, isdir, isfile, join
from pwd import getpwnam
from random import randint, Random
from time import sleep
from hacksport.operations import create_user, execute
from hacksport.problem import (
Compiled,
Directory,
ExecutableFile,
File,
FlaskApp,
GroupWriteDirectory,
PHPApp,
WebService,
PreTemplatedFile,
ProtectedFile,
Remote,
Service,
)
# must follow hacksport.problem due to dependency on Challenge
from hacksport.docker import DockerChallenge
from hacksport.status import get_all_problem_instances, get_all_problems
from jinja2 import Environment, FileSystemLoader, Template
from shell_manager.package import package_problem
from shell_manager.util import (
DEPLOYED_ROOT,
FatalException,
get_attributes,
get_problem,
get_problem_root,
sanitize_name,
STAGING_ROOT,
get_problem_root_hashed,
get_pid_hash,
get_bundle,
DEB_ROOT,
SHARED_ROOT,
get_shared_config,
get_local_config,
acquire_lock,
release_lock,
)
from spur import RunProcessError
PORT_MAP_PATH = join(SHARED_ROOT, "port_map.json")
def challenge_meta(attributes):
"""
Returns a metaclass that will introduce the given attributes into the class
namespace.
Args:
attributes: The dictionary of attributes
Returns:
The metaclass described above
"""
class ChallengeMeta(ABCMeta):
def __new__(cls, name, bases, attr):
attrs = dict(attr)
attrs.update(attributes)
return super().__new__(cls, name, bases, attrs)
return ChallengeMeta
def update_problem_class(Class, problem_object, seed, user, instance_directory):
"""
Changes the metaclass of the given class to introduce necessary fields before
object instantiation.
Args:
Class: The problem class to be updated
problem_name: The problem name
seed: The seed for the Random object
user: The linux username for this challenge instance
instance_directory: The deployment directory for this instance
Returns:
The updated class described above
"""
random = Random(seed)
attributes = deepcopy(problem_object)
# pass configuration options in as class fields
attributes.update(dict(shared_config))
attributes.update(dict(local_config))
attributes.update(
{
"random": random,
"user": user,
"directory": instance_directory,
"server": local_config.hostname,
}
)
return challenge_meta(attributes)(Class.__name__, Class.__bases__, Class.__dict__)
def get_username(problem_name, instance_number):
"""
Determine the username for a given problem instance.
Given limitation of 32char linux usernames with useradd, truncates generated
username to 28chars. This allows up to 1000 instances of problems with
usernames that do require truncation.
"""
username = "{}_{}".format(sanitize_name(problem_name)[0:28], instance_number)
if len(username) > 32:
raise Exception(
"Unable to create more than 1000 instances of this problem. Shorten problem name.")
return username
def create_service_files(problem, instance_number, path):
"""
Creates xinetd service files for the given problem.
Creates a service file for a problem
Args:
problem: the instantiated problem object
instance_number: the instance number
path: the location to drop the service file
Returns:
A tuple containing (service_file_path, socket_file_path).
socket_file_path will be None if the problem is not a service.
"""
# See https://github.com/puppetlabs/puppetlabs-xinetd/blob/master/templates/service.erb
# and https://linux.die.net/man/5/xinetd.conf
xinetd_template = """
service %s
{
type = UNLISTED
port = %d
disable = no
socket_type = stream
protocol = tcp
wait = %s
user = %s
group = %s
log_type = FILE /var/log/xinetd-hacksport-%s.log
log_on_success = HOST EXIT DURATION
log_on_failure = HOST
cps = 50 3
rlimit_cpu = %s
per_source = 100
server = %s
}
"""
is_service = isinstance(problem, Service)
is_web = isinstance(problem, WebService)
if not is_service and not is_web:
return (None, None)
if getattr(problem, "skip_service_file_creation", False):
return (None, None)
problem_service_info = problem.service()
service_content = xinetd_template % (
problem.user,
problem.port,
"no" if problem_service_info["Type"] == "oneshot" else "yes",
problem.user,
problem.user,
problem.user,
"100" if problem_service_info["Type"] == "oneshot" else "UNLIMITED",
problem_service_info["ExecStart"],
)
service_file_path = join(path, "{}".format(problem.user))
with open(service_file_path, "w") as f:
f.write(service_content)
return (service_file_path, None)
def create_instance_user(problem_name, instance_number):
"""
Generates a random username based on the problem name. The username returned is guaranteed to
not exist.
Args:
problem_name: The name of the problem
instance_number: The unique number for this instance
Returns:
The created username
"""
converted_name = sanitize_name(problem_name)
username = get_username(converted_name, instance_number)
try:
# Check if the user already exists.
user = getpwnam(username)
new = False
except KeyError:
create_user(username)
new = True
return username, new
def generate_instance_deployment_directory(username):
"""
Generates the instance deployment directory for the given username
"""
directory = username
if shared_config.obfuscate_problem_directories:
directory = (
username
+ "_"
+ md5((username + shared_config.deploy_secret).encode()).hexdigest()
)
root_dir = shared_config.problem_directory_root
if not isdir(root_dir):
os.makedirs(root_dir)
# make the root not world readable
os.chmod(root_dir, 0o751)
path = join(root_dir, directory)
if not isdir(path):
os.makedirs(path)
return path
def generate_seed(*args):
"""
Generates a seed using the list of string arguments
"""
return md5("".join(args).encode("utf-8")).hexdigest()
def generate_staging_directory(
root=STAGING_ROOT, problem_name=None, instance_number=None
):
"""
Creates a random, empty staging directory
Args:
root: The parent directory for the new directory. Defaults to join(SHARED_ROOT, "staging")
Optional prefixes to help identify the staging directory: problem_name, instance_number
Returns:
The path of the generated directory
"""
if not os.path.isdir(root):
os.makedirs(root)
# ensure that the staging files are not world-readable
os.chmod(root, 0o750)
def get_new_path():
prefix = ""
if problem_name is not None:
prefix += problem_name + "_"
if instance_number is not None:
prefix += str(instance_number) + "_"
path = join(root, prefix + str(randint(0, 1e16)))
if os.path.isdir(path):
return get_new_path()
return path
path = get_new_path()
os.makedirs(path)
return path
def template_string(template, **kwargs):
"""
Templates the given string with the keyword arguments
Args:
template: The template string
**kwards: Variables to use in templating
"""
temp = Template(template)
return temp.render(**kwargs)
def template_file(in_file_path, out_file_path, **kwargs):
"""
Templates the given file with the keyword arguments.
Args:
in_file_path: The path to the template
out_file_path: The path to output the templated file
**kwargs: Variables to use in templating
"""
env = Environment(
loader=FileSystemLoader(os.path.dirname(in_file_path)),
keep_trailing_newline=True,
)
template = env.get_template(os.path.basename(in_file_path))
output = template.render(**kwargs)
with open(out_file_path, "w") as f:
f.write(output)
def template_staging_directory(staging_directory, problem):
"""
Templates every file in the staging directory recursively other than
problem.json and challenge.py.
Args:
staging_directory: The path of the staging directory
problem: The problem object
"""
# prepend the staging directory to all
dont_template = copy(problem.dont_template) + [
"app/templates",
"problem.json",
"challenge.py",
"templates",
"__pre_templated",
]
dont_template_files = list(filter(isfile, dont_template))
dont_template_directories = list(filter(isdir, dont_template))
dont_template_directories = [
join(staging_directory, directory) for directory in dont_template_directories
]
for root, dirnames, filenames in os.walk(staging_directory):
if any(
os.path.commonprefix([root, path]) == path
for path in dont_template_directories
):
logger.debug(
"....Not templating anything in the directory '{}'".format(root)
)
continue
for filename in filenames:
if filename in dont_template_files:
logger.debug("....Not templating the file '{}'".format(filename))
continue
fullpath = join(root, filename)
try:
template_file(fullpath, fullpath, **get_attributes(problem))
except UnicodeDecodeError as e:
# tried templating binary file
pass
def deploy_files(
staging_directory, instance_directory, file_list, username, problem_class
):
"""
Copies the list of files from the staging directory to the instance directory.
Will properly set permissions and setgid files based on their type.
"""
# get uid and gid for default and problem user
user = getpwnam(username)
default = getpwnam(shared_config.default_user)
for f in file_list:
# copy the file over, making the directories as needed
output_path = join(instance_directory, f.path)
if not os.path.isdir(os.path.dirname(output_path)):
os.makedirs(os.path.dirname(output_path))
if not isinstance(f, Directory):
if isinstance(f, PreTemplatedFile):
file_source = join(staging_directory, "__pre_templated", f.path)
else:
file_source = join(staging_directory, f.path)
shutil.copy2(file_source, output_path)
# set the ownership based on the type of file
if isinstance(f, ProtectedFile) or isinstance(f, ExecutableFile) or \
isinstance(f, GroupWriteDirectory):
os.chown(output_path, default.pw_uid, user.pw_gid)
else:
uid = default.pw_uid if f.user is None else getpwnam(f.user).pw_uid
gid = default.pw_gid if f.group is None else getgrnam(f.group).gr_gid
os.chown(output_path, uid, gid)
# set the permissions appropriately
os.chmod(output_path, f.permissions)
if issubclass(problem_class, Service):
os.chown(instance_directory, default.pw_uid, user.pw_gid)
os.chmod(instance_directory, 0o750)
def install_user_service(service_file, socket_file):
"""
Installs the service file and socket file into the xinetd
service directory, sets the service to start on boot, and
starts the service now.
Args:
service_file: The path to the systemd service file to install
socket_file: The path to the systemd socket file to install
"""
if service_file is None:
return
service_name = os.path.basename(service_file)
logger.debug("...Installing user service '%s'.", service_name)
# copy service file
service_path = os.path.join(XINETD_SERVICE_PATH, service_name)
shutil.copy2(service_file, service_path)
def generate_instance(
problem_object,
problem_directory,
instance_number,
staging_directory,
deployment_directory=None,
):
"""
Runs the setup functions of Problem in the correct order
Args:
problem_object: The contents of the problem.json
problem_directory: The directory to the problem
instance_number: The instance number to be generated
staging_directory: The temporary directory to store files in
deployment_directory: The directory that will be deployed to. Defaults to a deterministic, unique
directory generated for each problem,instance pair using the configuration options
PROBLEM_DIRECTORY_ROOT and OBFUSCATE_PROBLEM_DIRECTORIES
Returns:
A dict containing (problem, staging_directory, deployment_directory, files,
web_accessible_files, service_file, socket_file)
"""
logger.debug(
"Generating instance %d of problem '%s'.",
instance_number,
problem_object["unique_name"],
)
logger.debug("...Using staging directory %s", staging_directory)
username, new = create_instance_user(problem_object["name"], instance_number)
if new:
logger.debug("...Created problem user '%s'.", username)
else:
logger.debug("...Using existing problem user '%s'.", username)
if deployment_directory is None:
deployment_directory = generate_instance_deployment_directory(username)
logger.debug("...Using deployment directory '%s'.", deployment_directory)
seed = generate_seed(
problem_object["name"], shared_config.deploy_secret, str(instance_number)
)
logger.debug("...Generated random seed '%s' for deployment.", seed)
copy_path = join(staging_directory, PROBLEM_FILES_DIR)
shutil.copytree(problem_directory, copy_path)
pretemplated_directory = join(copy_path, "__pre_templated")
if isdir(pretemplated_directory):
shutil.rmtree(pretemplated_directory)
# store cwd to restore later
cwd = os.getcwd()
os.chdir(copy_path)
challenge = SourceFileLoader(
"challenge", join(copy_path, "challenge.py")
).load_module()
Problem = update_problem_class(
challenge.Problem, problem_object, seed, username, deployment_directory
)
# run methods in proper order
problem = Problem()
# reseed and generate flag
problem.flag = problem.generate_flag(Random(seed))
problem.flag_sha1 = sha1(problem.flag.encode("utf-8")).hexdigest()
logger.debug("...Instance %d flag is '%s'.", instance_number, problem.flag)
logger.debug("...Running problem initialize.")
problem.initialize()
shutil.copytree(copy_path, pretemplated_directory)
web_accessible_files = []
def url_for(
web_accessible_files, source_name, display=None, raw=False, pre_templated=False
):
if pre_templated:
source_path = join(copy_path, "__pre_templated", source_name)
else:
source_path = join(copy_path, source_name)
problem_hash = (
problem_object["name"] + shared_config.deploy_secret + str(instance_number)
)
problem_hash = md5(problem_hash.encode("utf-8")).hexdigest()
destination_path = join(STATIC_FILE_ROOT, problem_hash, source_name)
link_template = "<a href='{}'>{}</a>"
web_accessible_files.append(
(source_path, join(shared_config.web_root, destination_path))
)
uri_prefix = "//"
uri = join(uri_prefix, local_config.hostname, destination_path)
if not raw:
return link_template.format(
uri, source_name if display is None else display
)
return uri
problem.url_for = functools.partial(url_for, web_accessible_files)
logger.debug("...Templating the staging directory")
template_staging_directory(copy_path, problem)
if isinstance(problem, Compiled):
problem.compiler_setup()
if isinstance(problem, Remote):
problem.remote_setup()
if isinstance(problem, FlaskApp):
problem.flask_setup()
if isinstance(problem, PHPApp):
problem.php_setup()
if isinstance(problem, Service):
problem.service_setup()
logger.debug("...Running problem setup.")
problem.setup()
os.chdir(cwd)
all_files = copy(problem.files)
if isinstance(problem, Compiled):
all_files.extend(problem.compiled_files)
if isinstance(problem, Service):
all_files.extend(problem.service_files)
if not all([isinstance(f, File) for f in all_files]):
logger.error("All files must be created using the File class!")
raise FatalException
for f in all_files:
if not isinstance(f, Directory) and not os.path.isfile(join(copy_path, f.path)):
logger.error("File '%s' does not exist on the file system!", f)
service_file, socket_file = create_service_files(
problem, instance_number, staging_directory
)
logger.debug("...Created service files '%s','%s'.", service_file, socket_file)
# template the description
# change newline for <br>, otherwise it won't render on the pico website
problem.description = template_string(
problem.description, **get_attributes(problem)
).replace("\n", "<br>")
problem.hints = [template_string(hint, **get_attributes(problem)).replace("\n", "<br>") for hint in problem.hints]
logger.debug("...Instance description: %s", problem.description)
logger.debug("...Instance hints: %s", problem.hints)
# Steps to meet cmgr interface
if containerize:
# Create /challenge directory
try:
os.mkdir("/challenge", 0o700)
except FileExistsError:
logger.warn("/challenge already exists in container")
# Write flag into /challenge/metadata.json
with open("/challenge/metadata.json", "w") as out:
metadata = {"flag": problem.flag}
json.dump(metadata, out)
# Collect web_accessible_files into /challenge/artifacts.tar.gz
if len(web_accessible_files) >= 1:
logger.debug(f"Collecting web accessible files to artifacts.tar.gz")
with tarfile.open("/challenge/artifacts.tar.gz", "w:gz") as tar:
for f, _ in web_accessible_files:
tar.add(f, arcname=os.path.basename(f))
return {
"problem": problem,
"staging_directory": staging_directory,
"deployment_directory": deployment_directory,
"files": all_files,
"web_accessible_files": web_accessible_files,
"service_file": service_file,
"socket_file": socket_file,
}
def deploy_problem(
problem_directory,
instances=None,
test=False,
deployment_directory=None,
debug=False,
restart_xinetd=True,
containerize=False,
):
"""
Deploys the problem specified in problem_directory.
Args:
problem_directory: The directory storing the problem
instances: The list of instances to deploy. Defaults to [0]
test: Whether the instances are test instances. Defaults to False.
deployment_directory: If not None, the challenge will be deployed here
instead of their home directory
debug: Output debug info
restart_xinetd: Whether to restart xinetd upon deployment of this set
of instances for a problem. Defaults True as used by
tests, but typically is used with False from
deploy_problems, which takes in multiple problems.
containerize: Deployment is occuring in a container. This flag is used
by containerize and external tools like cmgr that deploy
challenges in an isolated environment.
"""
if instances is None:
instances = [0]
global current_problem, current_instance, port_map
problem_object = get_problem(problem_directory)
current_problem = problem_object["unique_name"]
instance_list = []
need_restart_xinetd = False
logger.debug("Beginning to deploy problem '%s'.", problem_object["name"])
problem_deb_location = (
os.path.join(DEB_ROOT, sanitize_name(problem_object["unique_name"])) + ".deb"
)
try:
subprocess.run(
"DEBIAN_FRONTEND=noninteractive apt-get -y install "
+ f"--reinstall {problem_deb_location}",
shell=True,
check=True,
stdout=subprocess.PIPE,
)
except subprocess.CalledProcessError:
logger.error("An error occurred while installing problem packages.")
raise FatalException
logger.debug("Reinstalled problem's deb package to fulfill dependencies")
for instance_number in instances:
current_instance = instance_number
staging_directory = generate_staging_directory(
problem_name=problem_object["name"], instance_number=instance_number
)
if test and deployment_directory is None:
deployment_directory = join(staging_directory, "deployed")
instance = generate_instance(
problem_object,
problem_directory,
instance_number,
staging_directory,
deployment_directory=deployment_directory,
)
instance_list.append((instance_number, instance))
deployment_json_dir = join(
DEPLOYED_ROOT,
"{}-{}".format(
sanitize_name(problem_object["name"]), get_pid_hash(problem_object, True)
),
)
if not os.path.isdir(deployment_json_dir):
os.makedirs(deployment_json_dir)
# ensure that the deployed files are not world-readable
os.chmod(DEPLOYED_ROOT, 0o750)
# all instances generated without issue. let's do something with them
for instance_number, instance in instance_list:
problem_path = join(instance["staging_directory"], PROBLEM_FILES_DIR)
problem = instance["problem"]
deployment_directory = instance["deployment_directory"]
logger.debug(
"...Copying problem files %s to deployment directory %s.",
instance["files"],
deployment_directory,
)
deploy_files(
problem_path,
deployment_directory,
instance["files"],
problem.user,
problem.__class__,
)
if test:
logger.info("Test instance %d information:", instance_number)
logger.info("...Description: %s", problem.description)
logger.info("...Deployment Directory: %s", deployment_directory)
logger.debug("Cleaning up test instance side-effects.")
logger.debug("...Killing user processes.")
# This doesn't look great.
try:
execute("killall -u {}".format(problem.user))
sleep(0.1)
except RunProcessError as e:
pass
logger.debug("...Removing test user '%s'.", problem.user)
execute(["userdel", problem.user])
deployment_json_dir = instance["staging_directory"]
else:
# copy files to the web root
logger.debug(
"...Copying web accessible files: %s", instance["web_accessible_files"]
)
for source, destination in instance["web_accessible_files"]:
if not os.path.isdir(os.path.dirname(destination)):
os.makedirs(os.path.dirname(destination))
shutil.copy2(source, destination)
if instance["service_file"] is not None:
install_user_service(instance["service_file"], instance["socket_file"])
# set to true, this will signal restart xinetd
need_restart_xinetd = True
# keep the staging directory if run with debug flag
# this can still be cleaned up by running "shell_manager clean"
if not debug:
shutil.rmtree(instance["staging_directory"])
deployment_info = {
"user": problem.user,
"deployment_directory": deployment_directory,
"service": None
if instance["service_file"] is None
else os.path.basename(instance["service_file"]),
"socket": None
if instance["socket_file"] is None
else os.path.basename(instance["socket_file"]),
"server": problem.server,
"description": problem.description,
"hints": problem.hints,
"flag": problem.flag,
"flag_sha1": problem.flag_sha1,
"instance_number": instance_number,
"should_symlink": not isinstance(problem, Service)
and len(instance["files"]) > 0,
"files": [f.to_dict() for f in instance["files"]],
"docker_challenge": isinstance(problem, DockerChallenge)
}
if isinstance(problem, Service):
deployment_info["port"] = problem.port
logger.debug("...Port %d has been allocated.", problem.port)
# pass along image digest so webui can launch the correct image
if isinstance(problem, DockerChallenge):
deployment_info["instance_digest"] = problem.image_digest
deployment_info["port_info"] = {n: p.dict() for n, p in problem.ports.items()}
port_map[(current_problem, instance_number)] = deployment_info.get("port", None)
instance_info_path = os.path.join(
deployment_json_dir, "{}.json".format(instance_number)
)
with open(instance_info_path, "w") as f:
f.write(json.dumps(deployment_info, indent=4, separators=(", ", ": ")))
logger.debug(
"The instance deployment information can be found at '%s'.",
instance_info_path,
)
# restart xinetd
if restart_xinetd and need_restart_xinetd:
execute(["service", "xinetd", "restart"], timeout=60)
logger.info(
"Problem instances %s were successfully deployed for '%s'.",
instances,
problem_object["unique_name"],
)
return need_restart_xinetd
def deploy_init(contain):
global shared_config, local_config, port_map, containerize
containerize = contain
shared_config = get_shared_config()
local_config = get_local_config()
# Attempt to load the port_map from file
try:
with open(PORT_MAP_PATH, "r") as f:
port_map = json.load(f)
port_map = {literal_eval(k): v for k, v in port_map.items()}
except FileNotFoundError:
# If it does not exist, create it
for path, problem in get_all_problems().items():
for instance in get_all_problem_instances(path):
port_map[
(problem["unique_name"], instance["instance_number"])
] = instance.get("port", None)
with open(PORT_MAP_PATH, "w") as f:
stringified_port_map = {repr(k): v for k, v in port_map.items()}
json.dump(stringified_port_map, f)
except IOError:
logger.error(f"Error loading port map from {PORT_MAP_PATH}")
raise
return shared_config, local_config, port_map
def deploy_problems(args):
""" Main entrypoint for problem deployment """
global FLAG_FMT
if args.flag_format:
FLAG_FMT = args.flag_format
logger.info(f"Deploying with custom flag format: {FLAG_FMT}")
shared_config, local_config, port_map = deploy_init(args.containerize)
need_restart_xinetd = False
try:
user = getpwnam(shared_config.default_user)
except KeyError as e:
logger.info(
"default_user '%s' does not exist. Creating the user now.",
shared_config.default_user,
)
create_user(shared_config.default_user)
problem_names = args.problem_names
if len(problem_names) == 1 and problem_names[0] == "all":
# Shortcut to deploy n instances of all problems
problem_names = [v["unique_name"] for k, v in get_all_problems().items()]
if args.instances:
instance_list = args.instances
else:
instance_list = list(range(0, args.num_instances))
if args.containerize and (len(problem_names) > 1 or len(instance_list) > 1):
logger.error("can only deploy a single instance per container")
return
acquire_lock()
try:
for problem_name in problem_names:
if not isdir(get_problem_root(problem_name, absolute=True)):
logger.error(f"'{problem_name}' is not an installed problem")
continue
source_location = get_problem_root(problem_name, absolute=True)
problem_object = get_problem(source_location)
instances_to_deploy = copy(instance_list)
is_static_flag = problem_object.get("static_flag", False)
if is_static_flag is True:
instances_to_deploy = [0]
# Avoid redeploying already-deployed instances
if not args.redeploy:
already_deployed = set()
for instance in get_all_problem_instances(problem_name):
already_deployed.add(instance["instance_number"])
instances_to_deploy = list(set(instances_to_deploy) - already_deployed)
if instances_to_deploy:
deploy_problem(
source_location,
instances=instances_to_deploy,
test=args.dry,
debug=args.debug,
restart_xinetd=False,
containerize=args.containerize
)
else:
logger.info(
"No additional instances to deploy for '%s'.",
problem_object["unique_name"],
)
finally:
# Restart xinetd unless specified. Service must be manually restarted
if not args.no_restart:
execute(["service", "xinetd", "restart"], timeout=60)
# Write out updated port map
with open(PORT_MAP_PATH, "w") as f:
stringified_port_map = {repr(k): v for k, v in port_map.items()}
json.dump(stringified_port_map, f)
release_lock()
def remove_instance_state(instance):
""" Removes state for an instance that is deployed to a host.
Includes: service files, deployment directory, users
"""
# Remove the xinetd service definition
service = instance["service"]
if service:
logger.debug("...Removing xinetd service '%s'.", service)
try:
os.remove(join(XINETD_SERVICE_PATH, service))
except FileNotFoundError:
logger.error("xinetd service definition missing, skipping")
# Remove the deployed instance directory
directory = instance["deployment_directory"]
logger.debug("...Removing deployment directory '%s'.", directory)
try:
shutil.rmtree(directory)
except FileNotFoundError:
logger.error("deployment directory missing, skipping")
# Kill any active instance processes
logger.debug(f"...Killing any instance processes")
try:
subprocess.check_output(f"pgrep -u {instance['user']} | xargs -r kill -15", shell=True)
except CalledProcessError as e:
logger.error(
"error killing processes, skipping - {}".format(str(e))
)
# Remove the problem user
user = instance["user"]
logger.debug("...Removing problem user '%s'.", user)
try:
execute(["userdel", user])
except RunProcessError as e:
logger.error(
"error removing problem user, skipping - {}".format(str(e))
)
def remove_instances(problem_name, instances_to_remove):
"""Remove all files and metadata for a given list of instances."""
deployed_instances = get_all_problem_instances(problem_name)
deployment_json_dir = join(DEPLOYED_ROOT, problem_name)
for instance in deployed_instances:
instance_number = instance["instance_number"]
if instance["instance_number"] in instances_to_remove:
logger.debug(f"Removing instance {instance_number} of {problem_name}")
containerize = 'containerize' in instance and instance['containerize']
if not containerize:
remove_instance_state(instance)
# Remove the internal instance metadata
deployment_json_path = join(
deployment_json_dir, "{}.json".format(instance_number)
)
logger.debug("...Removing instance metadata '%s'.", deployment_json_path)
os.remove(deployment_json_path)
logger.info(
"Problem instances %s were successfully removed for '%s'.",
instances_to_remove,
problem_name,
)
def undeploy_problems(args):
"""
Main entrypoint for problem undeployment
Does not remove the installed packages (apt-get remove [sanitized name with hash]).
Does not remove the problem from the web server (delete it from the mongo db).
"""
problem_names = args.problem_names
if len(problem_names) == 0:
logger.error("No problem name(s) specified")
raise FatalException
if len(problem_names) == 1 and problem_names[0] == "all":
# Shortcut to undeploy n instances of all problems
problem_names = [v["unique_name"] for k, v in get_all_problems().items()]
acquire_lock()
if args.instances:
instance_list = args.instances
else:
instance_list = list(range(0, args.num_instances))
try:
for problem_name in problem_names:
if not isdir(get_problem_root(problem_name, absolute=True)):
logger.error(f"'{problem_name}' is not an installed problem")
continue
instances_to_remove = copy(instance_list)
deployed_instances = set()
for instance in get_all_problem_instances(problem_name):
deployed_instances.add(instance["instance_number"])
instances_to_remove = list(
set(instances_to_remove).intersection(deployed_instances)
)
if len(instances_to_remove) == 0:
logger.warning(f"No deployed instances found for {problem_name}")
continue
remove_instances(problem_name, instances_to_remove)
finally:
execute(["service", "xinetd", "restart"], timeout=60)
release_lock()
|
|
import os
def add_source_files(self, sources, filetype, lib_env = None, shared = False):
import glob;
import string;
#if not lib_objects:
if not lib_env:
lib_env = self
if type(filetype) == type(""):
dir = self.Dir('.').abspath
list = glob.glob(dir + "/"+filetype)
for f in list:
sources.append( self.Object(f) )
else:
for f in filetype:
sources.append(self.Object(f))
def build_shader_header( target, source, env ):
for x in source:
print x
name = str(x)
name = name[ name.rfind("/")+1: ]
name = name[ name.rfind("\\")+1: ]
name = name.replace(".","_")
fs = open(str(x),"r")
fd = open(str(x)+".h","w")
fd.write("/* this file has been generated by SCons, do not edit! */\n")
fd.write("static const char *"+name+"=\n")
line=fs.readline()
while(line):
line=line.replace("\r","")
line=line.replace("\n","")
line=line.replace("\\","\\\\")
line=line.replace("\"","\\\"")
fd.write("\""+line+"\\n\"\n")
line=fs.readline()
fd.write(";\n")
return 0
def build_glsl_header( filename ):
fs = open(filename,"r")
line=fs.readline()
vertex_lines=[]
fragment_lines=[]
uniforms=[]
attributes=[]
fbos=[]
conditionals=[]
texunits=[]
texunit_names=[]
ubos=[]
ubo_names=[]
reading=""
line_offset=0
vertex_offset=0
fragment_offset=0
while(line):
if (line.find("[vertex]")!=-1):
reading="vertex"
line=fs.readline()
line_offset+=1
vertex_offset=line_offset
continue
if (line.find("[fragment]")!=-1):
reading="fragment"
line=fs.readline()
line_offset+=1
fragment_offset=line_offset
continue
if (line.find("#ifdef ")!=-1):
ifdefline = line.replace("#ifdef ","").strip()
if (not ifdefline in conditionals):
conditionals+=[ifdefline]
if (line.find("#elif defined(")!=-1):
ifdefline = line.replace("#elif defined(","").strip()
ifdefline = ifdefline.replace(")","").strip()
if (not ifdefline in conditionals):
conditionals+=[ifdefline]
import re
if re.search(r"^\s*uniform", line):
if (line.lower().find("texunit:")!=-1):
#texture unit
texunit = str(int( line[line.find(":")+1:].strip() ))
uline=line[:line.lower().find("//")]
uline = uline.replace("uniform","");
uline = uline.replace(";","");
lines = uline.split(",")
for x in lines:
x = x.strip()
x = x[ x.rfind(" ")+1: ]
if (x.find("[")!=-1):
#unfiorm array
x = x[ :x.find("[") ]
if (not x in texunit_names):
texunits+=[(x,texunit)]
texunit_names+=[x]
elif (line.lower().find("ubo:")!=-1):
#ubo
uboidx = str(int( line[line.find(":")+1:].strip() ))
uline=line[:line.lower().find("//")]
uline = uline[uline.find("uniform")+len("uniform"):];
uline = uline.replace(";","");
uline = uline.replace("{","");
lines = uline.split(",")
for x in lines:
x = x.strip()
x = x[ x.rfind(" ")+1: ]
if (x.find("[")!=-1):
#unfiorm array
x = x[ :x.find("[") ]
if (not x in ubo_names):
ubos+=[(x,uboidx)]
ubo_names+=[x]
else:
uline = line.replace("uniform","");
uline = uline.replace(";","");
lines = uline.split(",")
for x in lines:
x = x.strip()
x = x[ x.rfind(" ")+1: ]
if (x.find("[")!=-1):
#unfiorm array
x = x[ :x.find("[") ]
if (not x in uniforms):
uniforms+=[x]
if ((line.strip().find("in ")==0 or line.strip().find("attribute ")==0) and line.find("attrib:")!=-1):
uline = line.replace("in ","");
uline = uline.replace("attribute ","");
uline = uline.replace(";","");
uline = uline[ uline.find(" "): ].strip()
if (uline.find("//")!=-1):
name,bind = uline.split("//")
if (bind.find("attrib:")!=-1):
name=name.strip()
bind=bind.replace("attrib:","").strip()
attributes+=[(name,bind)]
if (line.strip().find("out ")==0):
uline = line.replace("out","").strip();
uline = uline.replace(";","");
uline = uline[ uline.find(" "): ].strip()
if (uline.find("//")!=-1):
name,bind = uline.split("//")
if (bind.find("drawbuffer:")!=-1):
name=name.strip()
bind=bind.replace("drawbuffer:","").strip()
fbos+=[(name,bind)]
line=line.replace("\r","")
line=line.replace("\n","")
line=line.replace("\\","\\\\")
line=line.replace("\"","\\\"")
#line=line+"\\n\\" no need to anymore
if (reading=="vertex"):
vertex_lines+=[line]
if (reading=="fragment"):
fragment_lines+=[line]
line=fs.readline()
line_offset+=1
fs.close();
out_file = filename+".h"
fd = open(out_file,"w")
fd.write("/* WARNING, THIS FILE WAS GENERATED, DO NOT EDIT */\n");
out_file_base = out_file
out_file_base = out_file_base[ out_file_base.rfind("/")+1: ]
out_file_base = out_file_base[ out_file_base.rfind("\\")+1: ]
# print("out file "+out_file+" base " +out_file_base)
out_file_ifdef = out_file_base.replace(".","_").upper()
fd.write("#ifndef "+out_file_ifdef+"\n")
fd.write("#define "+out_file_ifdef+"\n")
out_file_class = out_file_base.replace(".glsl.h","").title().replace("_","").replace(".","")+"ShaderGL";
fd.write("\n\n");
fd.write("#include \"drivers/opengl/shader_gl.h\"\n\n\n");
fd.write("class "+out_file_class+" : public ShaderGL {\n\n");
fd.write("\t virtual String get_shader_name() const { return \""+out_file_class+"\"; }\n");
fd.write("public:\n\n");
if (len(conditionals)):
fd.write("\tenum Conditionals {\n");
for x in conditionals:
fd.write("\t\t"+x+",\n");
fd.write("\t};\n\n");
if (len(uniforms)):
fd.write("\tenum Uniforms {\n");
for x in uniforms:
fd.write("\t\t"+x.upper()+",\n");
fd.write("\t};\n\n");
fd.write("\t_FORCE_INLINE_ int get_uniform(Uniforms p_uniform) const { return _get_uniform(p_uniform); }\n\n");
if (len(conditionals)):
fd.write("\t_FORCE_INLINE_ void set_conditional(Conditionals p_conditional,bool p_enable) { _set_conditional(p_conditional,p_enable); }\n\n");
fd.write("\t#define _FU if (get_uniform(p_uniform)<0) return; ERR_FAIL_COND( get_active()!=this );\n\n ");
fd.write("\t_FORCE_INLINE_ void set_uniform(Uniforms p_uniform, float p_value) { _FU glUniform1f(get_uniform(p_uniform),p_value); }\n\n");
fd.write("\t_FORCE_INLINE_ void set_uniform(Uniforms p_uniform, double p_value) { _FU glUniform1f(get_uniform(p_uniform),p_value); }\n\n");
fd.write("\t_FORCE_INLINE_ void set_uniform(Uniforms p_uniform, uint8_t p_value) { _FU glUniform1i(get_uniform(p_uniform),p_value); }\n\n");
fd.write("\t_FORCE_INLINE_ void set_uniform(Uniforms p_uniform, int8_t p_value) { _FU glUniform1i(get_uniform(p_uniform),p_value); }\n\n");
fd.write("\t_FORCE_INLINE_ void set_uniform(Uniforms p_uniform, uint16_t p_value) { _FU glUniform1i(get_uniform(p_uniform),p_value); }\n\n");
fd.write("\t_FORCE_INLINE_ void set_uniform(Uniforms p_uniform, int16_t p_value) { _FU glUniform1i(get_uniform(p_uniform),p_value); }\n\n");
fd.write("\t_FORCE_INLINE_ void set_uniform(Uniforms p_uniform, uint32_t p_value) { _FU glUniform1i(get_uniform(p_uniform),p_value); }\n\n");
fd.write("\t_FORCE_INLINE_ void set_uniform(Uniforms p_uniform, int32_t p_value) { _FU glUniform1i(get_uniform(p_uniform),p_value); }\n\n");
#fd.write("\t_FORCE_INLINE_ void set_uniform(Uniforms p_uniform, uint64_t p_value) { _FU glUniform1i(get_uniform(p_uniform),p_value); }\n\n");
#fd.write("\t_FORCE_INLINE_ void set_uniform(Uniforms p_uniform, int64_t p_value) { _FU glUniform1i(get_uniform(p_uniform),p_value); }\n\n");
fd.write("\t_FORCE_INLINE_ void set_uniform(Uniforms p_uniform, unsigned long p_value) { _FU glUniform1i(get_uniform(p_uniform),p_value); }\n\n");
fd.write("\t_FORCE_INLINE_ void set_uniform(Uniforms p_uniform, long p_value) { _FU glUniform1i(get_uniform(p_uniform),p_value); }\n\n");
fd.write("\t_FORCE_INLINE_ void set_uniform(Uniforms p_uniform, const Color& p_color) { _FU GLfloat col[4]={p_color.r,p_color.g,p_color.b,p_color.a}; glUniform4fv(get_uniform(p_uniform),1,col); }\n\n");
fd.write("\t_FORCE_INLINE_ void set_uniform(Uniforms p_uniform, const Vector2& p_vec2) { _FU GLfloat vec2[2]={p_vec2.x,p_vec2.y}; glUniform2fv(get_uniform(p_uniform),1,vec2); }\n\n");
fd.write("\t_FORCE_INLINE_ void set_uniform(Uniforms p_uniform, const Vector3& p_vec3) { _FU GLfloat vec3[3]={p_vec3.x,p_vec3.y,p_vec3.z}; glUniform3fv(get_uniform(p_uniform),1,vec3); }\n\n");
fd.write("\t_FORCE_INLINE_ void set_uniform(Uniforms p_uniform, float p_a, float p_b) { _FU glUniform2f(get_uniform(p_uniform),p_a,p_b); }\n\n");
fd.write("\t_FORCE_INLINE_ void set_uniform(Uniforms p_uniform, float p_a, float p_b, float p_c) { _FU glUniform3f(get_uniform(p_uniform),p_a,p_b,p_c); }\n\n");
fd.write("\t_FORCE_INLINE_ void set_uniform(Uniforms p_uniform, float p_a, float p_b, float p_c, float p_d) { _FU glUniform4f(get_uniform(p_uniform),p_a,p_b,p_c,p_d); }\n\n");
fd.write("""\t_FORCE_INLINE_ void set_uniform(Uniforms p_uniform, const Transform& p_transform) { _FU
const Transform &tr = p_transform;
GLfloat matrix[16]={ /* build a 16x16 matrix */
tr.basis.elements[0][0],
tr.basis.elements[1][0],
tr.basis.elements[2][0],
0,
tr.basis.elements[0][1],
tr.basis.elements[1][1],
tr.basis.elements[2][1],
0,
tr.basis.elements[0][2],
tr.basis.elements[1][2],
tr.basis.elements[2][2],
0,
tr.origin.x,
tr.origin.y,
tr.origin.z,
1
};
glUniformMatrix4fv(get_uniform(p_uniform),1,false,matrix);
}
""");
fd.write("""\t_FORCE_INLINE_ void set_uniform(Uniforms p_uniform, const Matrix32& p_transform) { _FU
const Matrix32 &tr = p_transform;
GLfloat matrix[16]={ /* build a 16x16 matrix */
tr.elements[0][0],
tr.elements[0][1],
0,
0,
tr.elements[1][0],
tr.elements[1][1],
0,
0,
0,
0,
1,
0,
tr.elements[2][0],
tr.elements[2][1],
0,
1
};
glUniformMatrix4fv(get_uniform(p_uniform),1,false,matrix);
}
""");
fd.write("""\t_FORCE_INLINE_ void set_uniform(Uniforms p_uniform, const CameraMatrix& p_matrix) { _FU
GLfloat matrix[16];
for (int i=0;i<4;i++) {
for (int j=0;j<4;j++) {
matrix[i*4+j]=p_matrix.matrix[i][j];
}
}
glUniformMatrix4fv(get_uniform(p_uniform),1,false,matrix);
}; """);
fd.write("\n\n#undef _FU\n\n\n");
fd.write("\tvirtual void init() {\n\n");
if (len(conditionals)):
fd.write("\t\tstatic const char* _conditional_strings[]={\n")
if (len(conditionals)):
for x in conditionals:
fd.write("\t\t\t\"#define "+x+"\\n\",\n");
fd.write("\t\t};\n\n");
else:
fd.write("\t\tstatic const char **_conditional_strings=NULL;\n")
if (len(uniforms)):
fd.write("\t\tstatic const char* _uniform_strings[]={\n")
if (len(uniforms)):
for x in uniforms:
fd.write("\t\t\t\""+x+"\",\n");
fd.write("\t\t};\n\n");
else:
fd.write("\t\tstatic const char **_uniform_strings=NULL;\n")
if (len(attributes)):
fd.write("\t\tstatic AttributePair _attribute_pairs[]={\n")
for x in attributes:
fd.write("\t\t\t{\""+x[0]+"\","+x[1]+"},\n");
fd.write("\t\t};\n\n");
else:
fd.write("\t\tstatic AttributePair *_attribute_pairs=NULL;\n")
if (len(fbos)):
fd.write("\t\tstatic FBOPair _fbo_pairs[]={\n")
for x in fbos:
fd.write("\t\t\t{\""+x[0]+"\","+x[1]+"},\n");
fd.write("\t\t};\n\n");
else:
fd.write("\t\tstatic FBOPair *_fbo_pairs=NULL;\n")
if (len(ubos)):
fd.write("\t\tstatic UBOPair _ubo_pairs[]={\n")
for x in ubos:
fd.write("\t\t\t{\""+x[0]+"\","+x[1]+"},\n");
fd.write("\t\t};\n\n");
else:
fd.write("\t\tstatic UBOPair *_ubo_pairs=NULL;\n")
if (len(texunits)):
fd.write("\t\tstatic TexUnitPair _texunit_pairs[]={\n")
for x in texunits:
fd.write("\t\t\t{\""+x[0]+"\","+x[1]+"},\n");
fd.write("\t\t};\n\n");
else:
fd.write("\t\tstatic TexUnitPair *_texunit_pairs=NULL;\n")
fd.write("\t\tstatic const char* _vertex_code=\"\\\n")
for x in vertex_lines:
fd.write("\t\t\t"+x+"\n");
fd.write("\t\t\";\n\n");
fd.write("\t\tstatic const int _vertex_code_start="+str(vertex_offset)+";\n")
fd.write("\t\tstatic const char* _fragment_code=\"\\\n")
for x in fragment_lines:
fd.write("\t\t\t"+x+"\n");
fd.write("\t\t\";\n\n");
fd.write("\t\tstatic const int _fragment_code_start="+str(fragment_offset)+";\n")
fd.write("\t\tsetup(_conditional_strings,"+str(len(conditionals))+",_uniform_strings,"+str(len(uniforms))+",_attribute_pairs,"+str(len(attributes))+",_fbo_pairs,"+str(len(fbos))+",_ubo_pairs,"+str(len(ubos))+",_texunit_pairs,"+str(len(texunits))+",_vertex_code,_fragment_code,_vertex_code_start,_fragment_code_start);\n")
fd.write("\t};\n\n")
fd.write("};\n\n");
fd.write("#endif\n\n");
fd.close();
def build_glsl_headers( target, source, env ):
for x in source:
build_glsl_header(str(x));
return 0
def build_hlsl_dx9_header( filename ):
fs = open(filename,"r")
line=fs.readline()
vertex_lines=[]
fragment_lines=[]
uniforms=[]
fragment_uniforms=[]
attributes=[]
fbos=[]
conditionals=[]
reading=""
line_offset=0
vertex_offset=0
fragment_offset=0
while(line):
if (line.find("[vertex]")!=-1):
reading="vertex"
line=fs.readline()
line_offset+=1
vertex_offset=line_offset
continue
if (line.find("[fragment]")!=-1):
reading="fragment"
line=fs.readline()
line_offset+=1
fragment_offset=line_offset
continue
if (line.find("#ifdef ")!=-1):
ifdefline = line.replace("#ifdef ","").strip()
if (not ifdefline in conditionals):
conditionals+=[ifdefline]
if (line.find("#elif defined(")!=-1):
ifdefline = line.replace("#elif defined(","").strip()
ifdefline = ifdefline.replace(")","").strip()
if (not ifdefline in conditionals):
conditionals+=[ifdefline]
if (line.find("uniform")!=-1):
uline = line.replace("uniform","");
uline = uline.replace(";","");
lines = uline.split(",")
for x in lines:
x = x.strip()
x = x[ x.rfind(" ")+1: ]
if (x.find("[")!=-1):
#unfiorm array
x = x[ :x.find("[") ]
if (not x in uniforms):
uniforms+=[x]
fragment_uniforms+=[reading=="fragment"]
line=line.replace("\r","")
line=line.replace("\n","")
line=line.replace("\\","\\\\")
line=line.replace("\"","\\\"")
line=line+"\\n\\"
if (reading=="vertex"):
vertex_lines+=[line]
if (reading=="fragment"):
fragment_lines+=[line]
line=fs.readline()
line_offset+=1
fs.close();
out_file = filename+".h"
fd = open(out_file,"w")
fd.write("/* WARNING, THIS FILE WAS GENERATED, DO NOT EDIT */\n");
out_file_base = out_file
out_file_base = out_file_base[ out_file_base.rfind("/")+1: ]
out_file_base = out_file_base[ out_file_base.rfind("\\")+1: ]
# print("out file "+out_file+" base " +out_file_base)
out_file_ifdef = out_file_base.replace(".","_").upper()
fd.write("#ifndef "+out_file_ifdef+"\n")
fd.write("#define "+out_file_ifdef+"\n")
out_file_class = out_file_base.replace(".hlsl.h","").title().replace("_","").replace(".","")+"ShaderDX9";
fd.write("\n\n");
fd.write("#include \"drivers/directx9/shader_dx9.h\"\n\n\n");
fd.write("class "+out_file_class+" : public ShaderDX9 {\n\n");
fd.write("\t virtual String get_shader_name() const { return \""+out_file_class+"\"; }\n");
fd.write("public:\n\n");
if (len(conditionals)):
fd.write("\tenum Conditionals {\n");
for x in conditionals:
fd.write("\t\t"+x+",\n");
fd.write("\t};\n\n");
if (len(uniforms)):
fd.write("\tenum Uniforms {\n");
for x in uniforms:
fd.write("\t\t"+x.upper()+",\n");
fd.write("\t};\n\n");
if (len(conditionals)):
fd.write("\t_FORCE_INLINE_ void set_conditional(Conditionals p_conditional,bool p_enable) { _set_conditional(p_conditional,p_enable); }\n\n");
fd.write("\t#define _FU if (!_uniform_valid(p_uniform)) return; ERR_FAIL_COND( get_active()!=this );\n\n ");
fd.write("\t_FORCE_INLINE_ void set_uniform(Uniforms p_uniform, bool p_value) { _FU set_uniformb(p_uniform,p_value); }\n\n");
fd.write("\t_FORCE_INLINE_ void set_uniform(Uniforms p_uniform, float p_value) { _FU set_uniformf(p_uniform,p_value); }\n\n");
fd.write("\t_FORCE_INLINE_ void set_uniform(Uniforms p_uniform, double p_value) { _FU set_uniformf(p_uniform,p_value); }\n\n");
fd.write("\t_FORCE_INLINE_ void set_uniform(Uniforms p_uniform, uint8_t p_value) { _FU set_uniformi(p_uniform,p_value); }\n\n");
fd.write("\t_FORCE_INLINE_ void set_uniform(Uniforms p_uniform, int8_t p_value) { _FU set_uniformi(p_uniform,p_value); }\n\n");
fd.write("\t_FORCE_INLINE_ void set_uniform(Uniforms p_uniform, uint16_t p_value) { _FU set_uniformi(p_uniform,p_value); }\n\n");
fd.write("\t_FORCE_INLINE_ void set_uniform(Uniforms p_uniform, int16_t p_value) { _FU set_uniformi(p_uniform,p_value); }\n\n");
fd.write("\t_FORCE_INLINE_ void set_uniform(Uniforms p_uniform, uint32_t p_value) { _FU set_uniformi(p_uniform,p_value); }\n\n");
fd.write("\t_FORCE_INLINE_ void set_uniform(Uniforms p_uniform, int32_t p_value) { _FU set_uniformi(p_uniform,p_value); }\n\n");
#fd.write("\t_FORCE_INLINE_ void set_uniform(Uniforms p_uniform, uint64_t p_value) { _FU set_uniformi(p_uniform,p_value); }\n\n");
#fd.write("\t_FORCE_INLINE_ void set_uniform(Uniforms p_uniform, int64_t p_value) { _FU set_uniformi(p_uniform,p_value); }\n\n");
fd.write("\t_FORCE_INLINE_ void set_uniform(Uniforms p_uniform, unsigned long p_value) { _FU set_uniformi(p_uniform,p_value); }\n\n");
fd.write("\t_FORCE_INLINE_ void set_uniform(Uniforms p_uniform, long p_value) { _FU set_uniformi(p_uniform,p_value); }\n\n");
fd.write("\t_FORCE_INLINE_ void set_uniform(Uniforms p_uniform, const Color& p_color) { _FU float col[4]={p_color.r,p_color.g,p_color.b,p_color.a}; set_uniformfv(p_uniform,col); }\n\n");
fd.write("\t_FORCE_INLINE_ void set_uniform(Uniforms p_uniform, const Vector2& p_vec2) { _FU float vec2[4]={p_vec2.x,p_vec2.y,0,0}; set_uniformfv(p_uniform,vec2); }\n\n");
fd.write("\t_FORCE_INLINE_ void set_uniform(Uniforms p_uniform, const Vector3& p_vec3) { _FU float vec3[4]={p_vec3.x,p_vec3.y,p_vec3.z,0}; set_uniformfv(p_uniform,vec3); }\n\n");
fd.write("\t_FORCE_INLINE_ void set_uniform(Uniforms p_uniform, float p_a, float p_b) { _FU float vec2[4]={p_a,p_b,0,0}; set_uniformfv(p_uniform,vec2); }\n\n");
fd.write("\t_FORCE_INLINE_ void set_uniform(Uniforms p_uniform, float p_a, float p_b, float p_c) { _FU float vec3[4]={p_a,p_b,p_c,0}; set_uniformfv(p_uniform,vec3); }\n\n");
fd.write("\t_FORCE_INLINE_ void set_uniform(Uniforms p_uniform, float p_a, float p_b, float p_c, float p_d) { _FU float vec4[4]={p_a,p_b,p_c,p_d}; set_uniformfv(p_uniform,vec4); }\n\n");
fd.write("""\t_FORCE_INLINE_ void set_uniform(Uniforms p_uniform, const Transform& p_transform) { _FU
const Transform &tr = p_transform;
float matrix[16]={ /* build a 16x16 matrix */
tr.basis.elements[0][0],
tr.basis.elements[0][1],
tr.basis.elements[0][2],
tr.origin.x,
tr.basis.elements[1][0],
tr.basis.elements[1][1],
tr.basis.elements[1][2],
tr.origin.y,
tr.basis.elements[2][0],
tr.basis.elements[2][1],
tr.basis.elements[2][2],
tr.origin.z,
0,
0,
0,
1
};
set_uniformfv(p_uniform,&matrix[0],4);
}
""");
fd.write("""\t_FORCE_INLINE_ void set_uniform(Uniforms p_uniform, const CameraMatrix& p_matrix) { _FU
float matrix[16];
for (int i=0;i<4;i++) {
for (int j=0;j<4;j++) {
matrix[i*4+j]=p_matrix.matrix[j][i];
}
}
set_uniformfv(p_uniform,&matrix[0],4);
}; """);
fd.write("\n\n#undef _FU\n\n\n");
fd.write("\tvirtual void init(IDirect3DDevice9 *p_device,ShaderSupport p_version) {\n\n");
if (len(conditionals)):
fd.write("\t\tstatic const char* _conditional_strings[]={\n")
if (len(conditionals)):
for x in conditionals:
fd.write("\t\t\t\""+x+"\",\n");
fd.write("\t\t};\n\n");
else:
fd.write("\t\tstatic const char **_conditional_strings=NULL;\n")
if (len(uniforms)):
fd.write("\t\tstatic const char* _uniform_strings[]={\n")
if (len(uniforms)):
for x in uniforms:
fd.write("\t\t\t\""+x+"\",\n");
fd.write("\t\t};\n\n");
fd.write("\t\tstatic const bool _fragment_uniforms[]={\n")
if (len(uniforms)):
for x in fragment_uniforms:
if (x):
fd.write("\t\t\ttrue,\n");
else:
fd.write("\t\t\tfalse,\n");
fd.write("\t\t};\n\n");
else:
fd.write("\t\tstatic const char **_uniform_strings=NULL;\n")
fd.write("\t\tstatic const bool *_fragment_uniforms=NULL;\n")
fd.write("\t\tstatic const char* _vertex_code=\"\\\n")
for x in vertex_lines:
fd.write("\t\t\t"+x+"\n");
fd.write("\t\t\";\n\n");
fd.write("\t\tstatic const int _vertex_code_start="+str(vertex_offset)+";\n")
fd.write("\t\tstatic const char* _fragment_code=\"\\\n")
for x in fragment_lines:
fd.write("\t\t\t"+x+"\n");
fd.write("\t\t\";\n\n");
fd.write("\t\tstatic const int _fragment_code_start="+str(fragment_offset)+";\n")
fd.write("\t\tsetup(p_device,p_version,_conditional_strings,"+str(len(conditionals))+",_uniform_strings,"+str(len(uniforms))+",_fragment_uniforms,_vertex_code,_fragment_code,_vertex_code_start,_fragment_code_start);\n")
fd.write("\t};\n\n")
fd.write("};\n\n");
fd.write("#endif\n\n");
fd.close();
def build_hlsl_dx9_headers( target, source, env ):
for x in source:
build_hlsl_dx9_header(str(x));
return 0
class LegacyGLHeaderStruct:
def __init__(self):
self.vertex_lines=[]
self.fragment_lines=[]
self.uniforms=[]
self.attributes=[]
self.fbos=[]
self.conditionals=[]
self.enums={}
self.texunits=[]
self.texunit_names=[]
self.ubos=[]
self.ubo_names=[]
self.vertex_included_files=[]
self.fragment_included_files=[]
self.reading=""
self.line_offset=0
self.vertex_offset=0
self.fragment_offset=0
def include_file_in_legacygl_header( filename, header_data, depth ):
fs = open(filename,"r")
line=fs.readline()
while(line):
if (line.find("[vertex]")!=-1):
header_data.reading="vertex"
line=fs.readline()
header_data.line_offset+=1
header_data.vertex_offset=header_data.line_offset
continue
if (line.find("[fragment]")!=-1):
header_data.reading="fragment"
line=fs.readline()
header_data.line_offset+=1
header_data.fragment_offset=header_data.line_offset
continue
while(line.find("#include ")!=-1):
includeline = line.replace("#include ","").strip()[1:-1]
import os.path
included_file = os.path.relpath(os.path.dirname(filename) + "/" + includeline)
if (not included_file in header_data.vertex_included_files and header_data.reading=="vertex"):
header_data.vertex_included_files+=[included_file]
if(include_file_in_legacygl_header( included_file, header_data, depth + 1 ) == None):
print "Error in file '" + filename + "': #include " + includeline + "could not be found!"
elif (not included_file in header_data.fragment_included_files and header_data.reading=="fragment"):
header_data.fragment_included_files+=[included_file]
if(include_file_in_legacygl_header( included_file, header_data, depth + 1 ) == None):
print "Error in file '" + filename + "': #include " + includeline + "could not be found!"
line=fs.readline()
if (line.find("#ifdef ")!=-1 or line.find("#elif defined(")!=-1):
if (line.find("#ifdef ")!=-1):
ifdefline = line.replace("#ifdef ","").strip()
else:
ifdefline = line.replace("#elif defined(","").strip()
ifdefline = ifdefline.replace(")","").strip()
if (line.find("_EN_")!=-1):
enumbase = ifdefline[:ifdefline.find("_EN_")];
ifdefline = ifdefline.replace("_EN_","_")
line = line.replace("_EN_","_")
# print(enumbase+":"+ifdefline);
if (enumbase not in header_data.enums):
header_data.enums[enumbase]=[]
if (ifdefline not in header_data.enums[enumbase]):
header_data.enums[enumbase].append(ifdefline);
elif (not ifdefline in header_data.conditionals):
header_data.conditionals+=[ifdefline]
if (line.find("uniform")!=-1 and line.lower().find("texunit:")!=-1):
#texture unit
texunitstr = line[line.find(":")+1:].strip()
if (texunitstr=="auto"):
texunit="-1"
else:
texunit = str(int(texunitstr ))
uline=line[:line.lower().find("//")]
uline = uline.replace("uniform","");
uline = uline.replace("highp","");
uline = uline.replace(";","");
lines = uline.split(",")
for x in lines:
x = x.strip()
x = x[ x.rfind(" ")+1: ]
if (x.find("[")!=-1):
#unfiorm array
x = x[ :x.find("[") ]
if (not x in header_data.texunit_names):
header_data.texunits+=[(x,texunit)]
header_data.texunit_names+=[x]
elif (line.find("uniform")!=-1):
uline = line.replace("uniform","");
uline = uline.replace(";","");
lines = uline.split(",")
for x in lines:
x = x.strip()
x = x[ x.rfind(" ")+1: ]
if (x.find("[")!=-1):
#unfiorm array
x = x[ :x.find("[") ]
if (not x in header_data.uniforms):
header_data.uniforms+=[x]
if ((line.strip().find("in ")==0 or line.strip().find("attribute ")==0) and line.find("attrib:")!=-1):
uline = line.replace("in ","");
uline = uline.replace("attribute ","");
uline = uline.replace("highp ","");
uline = uline.replace(";","");
uline = uline[ uline.find(" "): ].strip()
if (uline.find("//")!=-1):
name,bind = uline.split("//")
if (bind.find("attrib:")!=-1):
name=name.strip()
bind=bind.replace("attrib:","").strip()
header_data.attributes+=[(name,bind)]
line=line.replace("\r","")
line=line.replace("\n","")
#line=line.replace("\\","\\\\")
#line=line.replace("\"","\\\"")
#line=line+"\\n\\"
if (header_data.reading=="vertex"):
header_data.vertex_lines+=[line]
if (header_data.reading=="fragment"):
header_data.fragment_lines+=[line]
line=fs.readline()
header_data.line_offset+=1
fs.close();
return header_data
def build_legacygl_header( filename, include, class_suffix, output_attribs ):
header_data = LegacyGLHeaderStruct()
include_file_in_legacygl_header( filename, header_data, 0 )
out_file = filename+".h"
fd = open(out_file,"w")
enum_constants=[]
fd.write("/* WARNING, THIS FILE WAS GENERATED, DO NOT EDIT */\n");
out_file_base = out_file
out_file_base = out_file_base[ out_file_base.rfind("/")+1: ]
out_file_base = out_file_base[ out_file_base.rfind("\\")+1: ]
# print("out file "+out_file+" base " +out_file_base)
out_file_ifdef = out_file_base.replace(".","_").upper()
fd.write("#ifndef "+out_file_ifdef+class_suffix+"_120\n")
fd.write("#define "+out_file_ifdef+class_suffix+"_120\n")
out_file_class = out_file_base.replace(".glsl.h","").title().replace("_","").replace(".","")+"Shader"+class_suffix;
fd.write("\n\n");
fd.write("#include \"" + include + "\"\n\n\n");
fd.write("class "+out_file_class+" : public Shader"+class_suffix+" {\n\n");
fd.write("\t virtual String get_shader_name() const { return \""+out_file_class+"\"; }\n");
fd.write("public:\n\n");
if (len(header_data.conditionals)):
fd.write("\tenum Conditionals {\n");
for x in header_data.conditionals:
fd.write("\t\t"+x.upper()+",\n");
fd.write("\t};\n\n");
if (len(header_data.uniforms)):
fd.write("\tenum Uniforms {\n");
for x in header_data.uniforms:
fd.write("\t\t"+x.upper()+",\n");
fd.write("\t};\n\n");
fd.write("\t_FORCE_INLINE_ int get_uniform(Uniforms p_uniform) const { return _get_uniform(p_uniform); }\n\n");
if (len(header_data.conditionals)):
fd.write("\t_FORCE_INLINE_ void set_conditional(Conditionals p_conditional,bool p_enable) { _set_conditional(p_conditional,p_enable); }\n\n");
fd.write("\t#define _FU if (get_uniform(p_uniform)<0) return; ERR_FAIL_COND( get_active()!=this );\n\n ");
fd.write("\t_FORCE_INLINE_ void set_uniform(Uniforms p_uniform, float p_value) { _FU glUniform1f(get_uniform(p_uniform),p_value); }\n\n");
fd.write("\t_FORCE_INLINE_ void set_uniform(Uniforms p_uniform, double p_value) { _FU glUniform1f(get_uniform(p_uniform),p_value); }\n\n");
fd.write("\t_FORCE_INLINE_ void set_uniform(Uniforms p_uniform, uint8_t p_value) { _FU glUniform1i(get_uniform(p_uniform),p_value); }\n\n");
fd.write("\t_FORCE_INLINE_ void set_uniform(Uniforms p_uniform, int8_t p_value) { _FU glUniform1i(get_uniform(p_uniform),p_value); }\n\n");
fd.write("\t_FORCE_INLINE_ void set_uniform(Uniforms p_uniform, uint16_t p_value) { _FU glUniform1i(get_uniform(p_uniform),p_value); }\n\n");
fd.write("\t_FORCE_INLINE_ void set_uniform(Uniforms p_uniform, int16_t p_value) { _FU glUniform1i(get_uniform(p_uniform),p_value); }\n\n");
fd.write("\t_FORCE_INLINE_ void set_uniform(Uniforms p_uniform, uint32_t p_value) { _FU glUniform1i(get_uniform(p_uniform),p_value); }\n\n");
fd.write("\t_FORCE_INLINE_ void set_uniform(Uniforms p_uniform, int32_t p_value) { _FU glUniform1i(get_uniform(p_uniform),p_value); }\n\n");
#fd.write("\t_FORCE_INLINE_ void set_uniform(Uniforms p_uniform, uint64_t p_value) { _FU glUniform1i(get_uniform(p_uniform),p_value); }\n\n");
#fd.write("\t_FORCE_INLINE_ void set_uniform(Uniforms p_uniform, int64_t p_value) { _FU glUniform1i(get_uniform(p_uniform),p_value); }\n\n");
#fd.write("\t_FORCE_INLINE_ void set_uniform(Uniforms p_uniform, unsigned long p_value) { _FU glUniform1i(get_uniform(p_uniform),p_value); }\n\n");
#fd.write("\t_FORCE_INLINE_ void set_uniform(Uniforms p_uniform, long p_value) { _FU glUniform1i(get_uniform(p_uniform),p_value); }\n\n");
fd.write("\t_FORCE_INLINE_ void set_uniform(Uniforms p_uniform, const Color& p_color) { _FU GLfloat col[4]={p_color.r,p_color.g,p_color.b,p_color.a}; glUniform4fv(get_uniform(p_uniform),1,col); }\n\n");
fd.write("\t_FORCE_INLINE_ void set_uniform(Uniforms p_uniform, const Vector2& p_vec2) { _FU GLfloat vec2[2]={p_vec2.x,p_vec2.y}; glUniform2fv(get_uniform(p_uniform),1,vec2); }\n\n");
fd.write("\t_FORCE_INLINE_ void set_uniform(Uniforms p_uniform, const Vector3& p_vec3) { _FU GLfloat vec3[3]={p_vec3.x,p_vec3.y,p_vec3.z}; glUniform3fv(get_uniform(p_uniform),1,vec3); }\n\n");
fd.write("\t_FORCE_INLINE_ void set_uniform(Uniforms p_uniform, float p_a, float p_b) { _FU glUniform2f(get_uniform(p_uniform),p_a,p_b); }\n\n");
fd.write("\t_FORCE_INLINE_ void set_uniform(Uniforms p_uniform, float p_a, float p_b, float p_c) { _FU glUniform3f(get_uniform(p_uniform),p_a,p_b,p_c); }\n\n");
fd.write("\t_FORCE_INLINE_ void set_uniform(Uniforms p_uniform, float p_a, float p_b, float p_c, float p_d) { _FU glUniform4f(get_uniform(p_uniform),p_a,p_b,p_c,p_d); }\n\n");
fd.write("""\t_FORCE_INLINE_ void set_uniform(Uniforms p_uniform, const Transform& p_transform) { _FU
const Transform &tr = p_transform;
GLfloat matrix[16]={ /* build a 16x16 matrix */
tr.basis.elements[0][0],
tr.basis.elements[1][0],
tr.basis.elements[2][0],
0,
tr.basis.elements[0][1],
tr.basis.elements[1][1],
tr.basis.elements[2][1],
0,
tr.basis.elements[0][2],
tr.basis.elements[1][2],
tr.basis.elements[2][2],
0,
tr.origin.x,
tr.origin.y,
tr.origin.z,
1
};
glUniformMatrix4fv(get_uniform(p_uniform),1,false,matrix);
}
""");
fd.write("""\t_FORCE_INLINE_ void set_uniform(Uniforms p_uniform, const Matrix32& p_transform) { _FU
const Matrix32 &tr = p_transform;
GLfloat matrix[16]={ /* build a 16x16 matrix */
tr.elements[0][0],
tr.elements[0][1],
0,
0,
tr.elements[1][0],
tr.elements[1][1],
0,
0,
0,
0,
1,
0,
tr.elements[2][0],
tr.elements[2][1],
0,
1
};
glUniformMatrix4fv(get_uniform(p_uniform),1,false,matrix);
}
""");
fd.write("""\t_FORCE_INLINE_ void set_uniform(Uniforms p_uniform, const CameraMatrix& p_matrix) { _FU
GLfloat matrix[16];
for (int i=0;i<4;i++) {
for (int j=0;j<4;j++) {
matrix[i*4+j]=p_matrix.matrix[i][j];
}
}
glUniformMatrix4fv(get_uniform(p_uniform),1,false,matrix);
}; """);
fd.write("\n\n#undef _FU\n\n\n");
fd.write("\tvirtual void init() {\n\n");
enum_value_count=0;
if (len(header_data.enums)):
fd.write("\t\t//Written using math, given nonstandarity of 64 bits integer constants..\n");
fd.write("\t\tstatic const Enum _enums[]={\n")
bitofs=len(header_data.conditionals)
enum_vals=[]
for xv in header_data.enums:
x=header_data.enums[xv]
bits=1
amt = len(x);
# print(x)
while(2**bits < amt):
bits+=1
# print("amount: "+str(amt)+" bits "+str(bits));
strs="{"
for i in range(amt):
strs+="\"#define "+x[i]+"\\n\","
v={}
v["set_mask"]="uint64_t("+str(i)+")<<"+str(bitofs)
v["clear_mask"]="((uint64_t(1)<<40)-1) ^ (((uint64_t(1)<<"+str(bits)+") - 1)<<"+str(bitofs)+")"
enum_vals.append(v)
enum_constants.append(x[i])
strs+="NULL}"
fd.write("\t\t\t{(uint64_t(1<<"+str(bits)+")-1)<<"+str(bitofs)+","+str(bitofs)+","+strs+"},\n");
bitofs+=bits
fd.write("\t\t};\n\n");
fd.write("\t\tstatic const EnumValue _enum_values[]={\n")
enum_value_count=len(enum_vals);
for x in enum_vals:
fd.write("\t\t\t{"+x["set_mask"]+","+x["clear_mask"]+"},\n");
fd.write("\t\t};\n\n");
else:
fd.write("\t\tstatic const Enum *_enums=NULL;\n")
fd.write("\t\tstatic const EnumValue *_enum_values=NULL;\n")
if (len(header_data.conditionals)):
fd.write("\t\tstatic const char* _conditional_strings[]={\n")
if (len(header_data.conditionals)):
for x in header_data.conditionals:
fd.write("\t\t\t\"#define "+x+"\\n\",\n");
fd.write("\t\t};\n\n");
else:
fd.write("\t\tstatic const char **_conditional_strings=NULL;\n")
if (len(header_data.uniforms)):
fd.write("\t\tstatic const char* _uniform_strings[]={\n")
if (len(header_data.uniforms)):
for x in header_data.uniforms:
fd.write("\t\t\t\""+x+"\",\n");
fd.write("\t\t};\n\n");
else:
fd.write("\t\tstatic const char **_uniform_strings=NULL;\n")
if output_attribs:
if (len(header_data.attributes)):
fd.write("\t\tstatic AttributePair _attribute_pairs[]={\n")
for x in header_data.attributes:
fd.write("\t\t\t{\""+x[0]+"\","+x[1]+"},\n");
fd.write("\t\t};\n\n");
else:
fd.write("\t\tstatic AttributePair *_attribute_pairs=NULL;\n")
if (len(header_data.texunits)):
fd.write("\t\tstatic TexUnitPair _texunit_pairs[]={\n")
for x in header_data.texunits:
fd.write("\t\t\t{\""+x[0]+"\","+x[1]+"},\n");
fd.write("\t\t};\n\n");
else:
fd.write("\t\tstatic TexUnitPair *_texunit_pairs=NULL;\n")
fd.write("\t\tstatic const char _vertex_code[]={\n")
for x in header_data.vertex_lines:
for i in range(len(x)):
fd.write(str(ord(x[i]))+",");
fd.write(str(ord('\n'))+",");
fd.write("\t\t0};\n\n");
fd.write("\t\tstatic const int _vertex_code_start="+str(header_data.vertex_offset)+";\n")
fd.write("\t\tstatic const char _fragment_code[]={\n")
for x in header_data.fragment_lines:
for i in range(len(x)):
fd.write(str(ord(x[i]))+",");
fd.write(str(ord('\n'))+",");
fd.write("\t\t0};\n\n");
fd.write("\t\tstatic const int _fragment_code_start="+str(header_data.fragment_offset)+";\n")
if output_attribs:
fd.write("\t\tsetup(_conditional_strings,"+str(len(header_data.conditionals))+",_uniform_strings,"+str(len(header_data.uniforms))+",_attribute_pairs,"+str(len(header_data.attributes))+", _texunit_pairs,"+str(len(header_data.texunits))+",_vertex_code,_fragment_code,_vertex_code_start,_fragment_code_start);\n")
else:
fd.write("\t\tsetup(_conditional_strings,"+str(len(header_data.conditionals))+",_uniform_strings,"+str(len(header_data.uniforms))+",_texunit_pairs,"+str(len(header_data.texunits))+",_enums,"+str(len(header_data.enums))+",_enum_values,"+str(enum_value_count)+",_vertex_code,_fragment_code,_vertex_code_start,_fragment_code_start);\n")
fd.write("\t};\n\n")
if (len(enum_constants)):
fd.write("\tenum EnumConditionals {\n")
for x in enum_constants:
fd.write("\t\t"+x.upper()+",\n");
fd.write("\t};\n\n");
fd.write("\tvoid set_enum_conditional(EnumConditionals p_cond) { _set_enum_conditional(p_cond); }\n")
fd.write("};\n\n");
fd.write("#endif\n\n");
fd.close();
def build_legacygl_headers( target, source, env ):
for x in source:
build_legacygl_header(str(x), include = "drivers/legacygl/shader_lgl.h", class_suffix = "LGL", output_attribs = False);
return 0
def build_gles2_headers( target, source, env ):
for x in source:
build_legacygl_header(str(x), include="drivers/gles2/shader_gles2.h", class_suffix = "GLES2", output_attribs = True)
def update_version():
rev = "custom_build"
if (os.getenv("BUILD_REVISION")!=None):
rev=os.getenv("BUILD_REVISION")
print("Using custom revision: "+rev)
import version
f=open("core/version.h","wb")
f.write("#define VERSION_SHORT_NAME "+str(version.short_name)+"\n")
f.write("#define VERSION_NAME "+str(version.name)+"\n")
f.write("#define VERSION_MAJOR "+str(version.major)+"\n")
f.write("#define VERSION_MINOR "+str(version.minor)+"\n")
if (hasattr(version, 'patch')):
f.write("#define VERSION_PATCH "+str(version.patch)+"\n")
f.write("#define VERSION_REVISION "+str(rev)+"\n")
f.write("#define VERSION_STATUS "+str(version.status)+"\n")
import datetime
f.write("#define VERSION_YEAR "+str(datetime.datetime.now().year)+"\n")
def parse_cg_file(fname, uniforms, sizes, conditionals):
import re
fs = open(fname, "r")
line=fs.readline()
while line:
if re.match(r"^\s*uniform", line):
res = re.match(r"uniform ([\d\w]*) ([\d\w]*)")
type = res.groups(1)
name = res.groups(2)
uniforms.append(name);
if (type.find("texobj") != -1):
sizes.append(1);
else:
t = re.match(r"float(\d)x(\d)", type);
if t:
sizes.append(int(t.groups(1)) * int(t.groups(2)))
else:
t = re.match(r"float(\d)", type);
sizes.append(int(t.groups(1)))
if line.find("[branch]") != -1:
conditionals.append(name);
line = fs.readline();
def build_cg_shader(sname):
vp_uniforms = []
vp_uniform_sizes = []
vp_conditionals = []
parse_cg_file("vp_"+sname+".cg", vp_uniforms, vp_uniform_sizes, vp_conditionals);
fp_uniforms = []
fp_uniform_sizes = []
fp_conditionals = []
parse_cg_file("fp_"+sname+".cg", fp_uniforms, fp_uniform_sizes, fp_conditionals);
fd = open("shader_"+sname+".cg.h", "w");
fd.write('\n#include "shader_cell.h"\n');
fd.write("\nclass Shader_" + sname + " : public ShaderCell {\n");
fd.write("\n\tstatic struct VertexUniforms[] = {\n");
offset = 0;
for i in range(0, len(vp_uniforms)):
fd.write('\t\t{ "%s", %d, %d },\n' % (vp_uniforms[i], offset, vp_uniform_sizes[i]))
offset = offset + vp_uniform_sizes[i];
fd.write("\t};\n\n");
fd.write("public:\n\n");
fd.write("\tenum {\n");
for i in range(0, len(vp_uniforms)):
fd.write('\t\tVP_%s,\n' % vp_uniforms[i].upper())
fd.write("\t};\n");
import glob
def detect_modules():
module_list=[]
includes_cpp=""
register_cpp=""
unregister_cpp=""
for x in glob.glob("modules/*"):
if (not os.path.isdir(x)):
continue
x=x.replace("modules/","") # rest of world
x=x.replace("modules\\","") # win32
module_list.append(x)
try:
with open("modules/"+x+"/register_types.h"):
includes_cpp+='#include "modules/'+x+'/register_types.h"\n'
register_cpp+='#ifdef MODULE_'+x.upper()+'_ENABLED\n'
register_cpp+='\tregister_'+x+'_types();\n'
register_cpp+='#endif\n'
unregister_cpp+='#ifdef MODULE_'+x.upper()+'_ENABLED\n'
unregister_cpp+='\tunregister_'+x+'_types();\n'
unregister_cpp+='#endif\n'
except IOError:
pass
modules_cpp="""
// modules.cpp - THIS FILE IS GENERATED, DO NOT EDIT!!!!!!!
#include "register_module_types.h"
"""+includes_cpp+"""
void register_module_types() {
"""+register_cpp+"""
}
void unregister_module_types() {
"""+unregister_cpp+"""
}
"""
f=open("modules/register_module_types.cpp","wb")
f.write(modules_cpp)
return module_list
def win32_spawn(sh, escape, cmd, args, env):
import subprocess
newargs = ' '.join(args[1:])
cmdline = cmd + " " + newargs
startupinfo = subprocess.STARTUPINFO()
#startupinfo.dwFlags |= subprocess.STARTF_USESHOWWINDOW
for e in env:
if type(env[e]) != type(""):
env[e] = str(env[e])
proc = subprocess.Popen(cmdline, stdin=subprocess.PIPE, stdout=subprocess.PIPE,
stderr=subprocess.PIPE, startupinfo=startupinfo, shell = False, env = env)
data, err = proc.communicate()
rv = proc.wait()
if rv:
print "====="
print err
print "====="
return rv
"""
def win32_spawn(sh, escape, cmd, args, spawnenv):
import win32file
import win32event
import win32process
import win32security
for var in spawnenv:
spawnenv[var] = spawnenv[var].encode('ascii', 'replace')
sAttrs = win32security.SECURITY_ATTRIBUTES()
StartupInfo = win32process.STARTUPINFO()
newargs = ' '.join(map(escape, args[1:]))
cmdline = cmd + " " + newargs
# check for any special operating system commands
if cmd == 'del':
for arg in args[1:]:
win32file.DeleteFile(arg)
exit_code = 0
else:
# otherwise execute the command.
hProcess, hThread, dwPid, dwTid = win32process.CreateProcess(None, cmdline, None, None, 1, 0, spawnenv, None, StartupInfo)
win32event.WaitForSingleObject(hProcess, win32event.INFINITE)
exit_code = win32process.GetExitCodeProcess(hProcess)
win32file.CloseHandle(hProcess);
win32file.CloseHandle(hThread);
return exit_code
"""
def android_add_maven_repository(self,url):
self.android_maven_repos.append(url)
def android_add_dependency(self,depline):
self.android_dependencies.append(depline)
def android_add_java_dir(self,subpath):
base_path = self.Dir(".").abspath+"/modules/"+self.current_module+"/"+subpath
self.android_java_dirs.append(base_path)
def android_add_res_dir(self,subpath):
base_path = self.Dir(".").abspath+"/modules/"+self.current_module+"/"+subpath
self.android_res_dirs.append(base_path)
def android_add_aidl_dir(self,subpath):
base_path = self.Dir(".").abspath+"/modules/"+self.current_module+"/"+subpath
self.android_aidl_dirs.append(base_path)
def android_add_jni_dir(self,subpath):
base_path = self.Dir(".").abspath+"/modules/"+self.current_module+"/"+subpath
self.android_jni_dirs.append(base_path)
def android_add_to_manifest(self,file):
base_path = self.Dir(".").abspath+"/modules/"+self.current_module+"/"+file
f = open(base_path,"rb")
self.android_manifest_chunk+=f.read()
def android_add_to_permissions(self,file):
base_path = self.Dir(".").abspath+"/modules/"+self.current_module+"/"+file
f = open(base_path,"rb")
self.android_permission_chunk+=f.read()
def android_add_to_attributes(self,file):
base_path = self.Dir(".").abspath+"/modules/"+self.current_module+"/"+file
f = open(base_path,"rb")
self.android_appattributes_chunk+=f.read()
def disable_module(self):
self.disabled_modules.append(self.current_module)
def use_windows_spawn_fix(self, platform=None):
if (os.name!="nt"):
return #not needed, only for windows
self.split_drivers=True
import subprocess
def mySubProcess(cmdline,env):
prefix = ""
if(platform == 'javascript'):
prefix = "python.exe "
startupinfo = subprocess.STARTUPINFO()
startupinfo.dwFlags |= subprocess.STARTF_USESHOWWINDOW
proc = subprocess.Popen(prefix + cmdline, stdin=subprocess.PIPE, stdout=subprocess.PIPE,
stderr=subprocess.PIPE, startupinfo=startupinfo, shell = False, env = env)
data, err = proc.communicate()
rv = proc.wait()
if rv:
print "====="
print err
print "====="
return rv
def mySpawn(sh, escape, cmd, args, env):
newargs = ' '.join(args[1:])
cmdline = cmd + " " + newargs
rv=0
if len(cmdline) > 32000 and cmd.endswith("ar") :
cmdline = cmd + " " + args[1] + " " + args[2] + " "
for i in range(3,len(args)) :
rv = mySubProcess( cmdline + args[i], env )
if rv :
break
else:
rv = mySubProcess( cmdline, env )
return rv
self['SPAWN'] = mySpawn
def save_active_platforms(apnames,ap):
for x in ap:
pth = x+"/logo.png"
# print("open path: "+pth)
pngf=open(pth,"rb");
b=pngf.read(1);
str=" /* AUTOGENERATED FILE, DO NOT EDIT */ \n"
str+=" static const unsigned char _"+x[9:]+"_logo[]={"
while(len(b)==1):
str+=hex(ord(b))
b=pngf.read(1);
if (len(b)==1):
str+=","
str+="};\n"
wf = x+"/logo.h"
logow = open(wf,"wb")
logow.write(str)
def colored(sys,env):
#If the output is not a terminal, do nothing
if not sys.stdout.isatty():
return
colors = {}
colors['cyan'] = '\033[96m'
colors['purple'] = '\033[95m'
colors['blue'] = '\033[94m'
colors['green'] = '\033[92m'
colors['yellow'] = '\033[93m'
colors['red'] = '\033[91m'
colors['end'] = '\033[0m'
compile_source_message = '%sCompiling %s==> %s$SOURCE%s' % (colors['blue'], colors['purple'], colors['yellow'], colors['end'])
java_compile_source_message = '%sCompiling %s==> %s$SOURCE%s' % (colors['blue'], colors['purple'], colors['yellow'], colors['end'])
compile_shared_source_message = '%sCompiling shared %s==> %s$SOURCE%s' % (colors['blue'], colors['purple'], colors['yellow'], colors['end'])
link_program_message = '%sLinking Program %s==> %s$TARGET%s' % (colors['red'], colors['purple'], colors['yellow'], colors['end'])
link_library_message = '%sLinking Static Library %s==> %s$TARGET%s' % (colors['red'], colors['purple'], colors['yellow'], colors['end'])
ranlib_library_message = '%sRanlib Library %s==> %s$TARGET%s' % (colors['red'], colors['purple'], colors['yellow'], colors['end'])
link_shared_library_message = '%sLinking Shared Library %s==> %s$TARGET%s' % (colors['red'], colors['purple'], colors['yellow'], colors['end'])
java_library_message = '%sCreating Java Archive %s==> %s$TARGET%s' % (colors['red'], colors['purple'], colors['yellow'], colors['end'])
env.Append( CXXCOMSTR=[compile_source_message] )
env.Append( CCCOMSTR=[compile_source_message] )
env.Append( SHCCCOMSTR=[compile_shared_source_message] )
env.Append( SHCXXCOMSTR=[compile_shared_source_message] )
env.Append( ARCOMSTR=[link_library_message] )
env.Append( RANLIBCOMSTR=[ranlib_library_message] )
env.Append( SHLINKCOMSTR=[link_shared_library_message] )
env.Append( LINKCOMSTR=[link_program_message] )
env.Append( JARCOMSTR=[java_library_message] )
env.Append( JAVACCOMSTR=[java_compile_source_message] )
|
|
"""
Vaex is a library for dealing with larger than memory DataFrames (out of core).
The most important class (datastructure) in vaex is the :class:`.DataFrame`. A DataFrame is obtained by either opening
the example dataset:
>>> import vaex
>>> df = vaex.example()
Or using :func:`open` to open a file.
>>> df1 = vaex.open("somedata.hdf5")
>>> df2 = vaex.open("somedata.fits")
>>> df2 = vaex.open("somedata.arrow")
>>> df4 = vaex.open("somedata.csv")
Or connecting to a remove server:
>>> df_remote = vaex.open("http://try.vaex.io/nyc_taxi_2015")
A few strong features of vaex are:
* Performance: works with huge tabular data, process over a billion (> 10\\ :sup:`9`\\ ) rows/second.
* Expression system / Virtual columns: compute on the fly, without wasting ram.
* Memory efficient: no memory copies when doing filtering/selections/subsets.
* Visualization: directly supported, a one-liner is often enough.
* User friendly API: you will only need to deal with a DataFrame object, and tab completion + docstring will help you out: `ds.mean<tab>`, feels very similar to Pandas.
* Very fast statistics on N dimensional grids such as histograms, running mean, heatmaps.
Follow the tutorial at https://docs.vaex.io/en/latest/tutorial.html to learn how to use vaex.
""" # -*- coding: utf-8 -*-
from __future__ import print_function
import glob
import re
import six
import vaex.dataframe
import vaex.dataset
from vaex.functions import register_function
from . import stat
# import vaex.file
# import vaex.export
from .delayed import delayed
from .groupby import *
from . import agg
import vaex.datasets
# import vaex.plot
# from vaex.dataframe import DataFrame
# del ServerRest, DataFrame
import vaex.settings
import logging
import pkg_resources
import os
from functools import reduce
try:
from . import version
except:
import sys
print("version file not found, please run git/hooks/post-commit or git/hooks/post-checkout and/or install them as hooks (see git/README)", file=sys.stderr)
raise
__version__ = version.get_versions()
def app(*args, **kwargs):
"""Create a vaex app, the QApplication mainloop must be started.
In ipython notebook/jupyter do the following:
>>> import vaex.ui.main # this causes the qt api level to be set properly
>>> import vaex
Next cell:
>>> %gui qt
Next cell:
>>> app = vaex.app()
From now on, you can run the app along with jupyter
"""
import vaex.ui.main
return vaex.ui.main.VaexApp()
def _convert_name(filenames, shuffle=False, suffix=None):
'''Convert a filename (or list of) to a filename with .hdf5 and optionally a -shuffle or other suffix'''
if not isinstance(filenames, (list, tuple)):
filenames = [filenames]
base = filenames[0]
if shuffle:
base += '-shuffle'
if suffix:
base += suffix
if len(filenames) > 1:
return base + "_and_{}_more.hdf5".format(len(filenames)-1)
else:
return base + ".hdf5"
def open(path, convert=False, shuffle=False, copy_index=False, *args, **kwargs):
"""Open a DataFrame from file given by path.
Example:
>>> df = vaex.open('sometable.hdf5')
>>> df = vaex.open('somedata*.csv', convert='bigdata.hdf5')
:param str or list path: local or absolute path to file, or glob string, or list of paths
:param convert: convert files to an hdf5 file for optimization, can also be a path
:param bool shuffle: shuffle converted DataFrame or not
:param args: extra arguments for file readers that need it
:param kwargs: extra keyword arguments
:param bool copy_index: copy index when source is read via pandas
:return: return a DataFrame on success, otherwise None
:rtype: DataFrame
S3 support:
Vaex supports streaming of hdf5 files from Amazon AWS object storage S3.
Files are by default cached in $HOME/.vaex/file-cache/s3 such that successive access
is as fast as native disk access. The following url parameters control S3 options:
* anon: Use anonymous access or not (false by default). (Allowed values are: true,True,1,false,False,0)
* use_cache: Use the disk cache or not, only set to false if the data should be accessed once. (Allowed values are: true,True,1,false,False,0)
* profile_name and other arguments are passed to :py:class:`s3fs.core.S3FileSystem`
All arguments can also be passed as kwargs, but then arguments such as `anon` can only be a boolean, not a string.
Examples:
>>> df = vaex.open('s3://vaex/taxi/yellow_taxi_2015_f32s.hdf5?anon=true')
>>> df = vaex.open('s3://vaex/taxi/yellow_taxi_2015_f32s.hdf5', anon=True) # Note that anon is a boolean, not the string 'true'
>>> df = vaex.open('s3://mybucket/path/to/file.hdf5?profile_name=myprofile')
GCS support:
Vaex supports streaming of hdf5 files from Google Cloud Storage.
Files are by default cached in $HOME/.vaex/file-cache/gs such that successive access
is as fast as native disk access. The following url parameters control GCS options:
* token: Authentication method for GCP. Use 'anon' for annonymous access. See https://gcsfs.readthedocs.io/en/latest/index.html#credentials for more details.
* use_cache: Use the disk cache or not, only set to false if the data should be accessed once. (Allowed values are: true,True,1,false,False,0).
* project and other arguments are passed to :py:class:`gcsfs.core.GCSFileSystem`
Examples:
>>> df = vaex.open('gs://vaex-data/airlines/us_airline_data_1988_2019.hdf5?token=anon')
>>> df = vaex.open('gs://vaex-data/testing/xys.hdf5?token=anon&cache=False')
"""
import vaex
try:
if path in aliases:
path = aliases[path]
if path.startswith("http://") or path.startswith("ws://") or \
path.startswith("vaex+http://") or path.startswith("vaex+ws://"): # TODO: think about https and wss
server, name = path.rsplit("/", 1)
url = urlparse(path)
if '?' in name:
name = name[:name.index('?')]
extra_args = {key: values[0] for key, values in parse_qs(url.query).items()}
if 'token' in extra_args:
kwargs['token'] = extra_args['token']
if 'token_trusted' in extra_args:
kwargs['token_trusted'] = extra_args['token_trusted']
client = vaex.connect(server, **kwargs)
return client[name]
if path.startswith("cluster"):
import vaex.enterprise.distributed
return vaex.enterprise.distributed.open(path, *args, **kwargs)
else:
import vaex.file
import glob
if isinstance(path, str):
paths = [path]
else:
paths = path
filenames = []
for path in paths:
# TODO: can we do glob with s3?
if path.startswith('s3://'):
filenames.append(path)
elif path.startswith('gs://'):
filenames.append(path)
else:
# sort to get predictable behaviour (useful for testing)
filenames.extend(list(sorted(glob.glob(path))))
ds = None
if len(filenames) == 0:
raise IOError('Could not open file: {}, it does not exist'.format(path))
filename_hdf5 = _convert_name(filenames, shuffle=shuffle)
filename_hdf5_noshuffle = _convert_name(filenames, shuffle=False)
if len(filenames) == 1:
path = filenames[0]
naked_path = path
if '?' in naked_path:
naked_path = naked_path[:naked_path.index('?')]
ext = os.path.splitext(naked_path)[1]
if os.path.exists(filename_hdf5) and convert: # also check mtime?
ds = vaex.file.open(filename_hdf5)
else:
if ext == '.csv' or naked_path.endswith(".csv.bz2"): # special support for csv.. should probably approach it a different way
csv_convert = filename_hdf5 if convert else False
ds = from_csv(path, copy_index=copy_index, convert=csv_convert, **kwargs)
else:
ds = vaex.file.open(path, *args, **kwargs)
if convert and ds:
ds.export_hdf5(filename_hdf5, shuffle=shuffle)
ds = vaex.file.open(filename_hdf5) # argument were meant for pandas?
if ds is None:
if os.path.exists(path):
raise IOError('Could not open file: {}, did you install vaex-hdf5? Is the format supported?'.format(path))
elif len(filenames) > 1:
if convert not in [True, False]:
filename_hdf5 = convert
else:
filename_hdf5 = _convert_name(filenames, shuffle=shuffle)
if os.path.exists(filename_hdf5) and convert: # also check mtime
ds = open(filename_hdf5)
else:
# with ProcessPoolExecutor() as executor:
# executor.submit(read_csv_and_convert, filenames, shuffle=shuffle, **kwargs)
dfs = []
for filename in filenames:
dfs.append(open(filename, convert=bool(convert), shuffle=shuffle, **kwargs))
ds = concat(dfs)
if convert:
ds.export_hdf5(filename_hdf5, shuffle=shuffle)
ds = vaex.file.open(filename_hdf5)
if ds is None:
raise IOError('Unknown error opening: {}'.format(path))
return ds
except:
logging.getLogger("vaex").error("error opening %r" % path)
raise
def open_many(filenames):
"""Open a list of filenames, and return a DataFrame with all DataFrames concatenated.
:param list[str] filenames: list of filenames/paths
:rtype: DataFrame
"""
dfs = []
for filename in filenames:
filename = filename.strip()
if filename and filename[0] != "#":
dfs.append(open(filename))
return concat(dfs)
def from_samp(username=None, password=None):
"""Connect to a SAMP Hub and wait for a single table load event, disconnect, download the table and return the DataFrame.
Useful if you want to send a single table from say TOPCAT to vaex in a python console or notebook.
"""
print("Waiting for SAMP message...")
import vaex.samp
t = vaex.samp.single_table(username=username, password=password)
return from_astropy_table(t.to_table())
def from_astropy_table(table):
"""Create a vaex DataFrame from an Astropy Table."""
import vaex.file.other
ds = vaex.file.other.DatasetAstropyTable(table=table)
return vaex.dataframe.DataFrameLocal(ds)
def from_dict(data):
"""Create an in memory dataset from a dict with column names as keys and list/numpy-arrays as values
Example
>>> data = {'A':[1,2,3],'B':['a','b','c']}
>>> vaex.from_dict(data)
# A B
0 1 'a'
1 2 'b'
2 3 'c'
:param data: A dict of {column:[value, value,...]}
:rtype: DataFrame
"""
return vaex.from_arrays(**data)
def from_items(*items):
"""Create an in memory DataFrame from numpy arrays, in contrast to from_arrays this keeps the order of columns intact (for Python < 3.6).
Example
>>> import vaex, numpy as np
>>> x = np.arange(5)
>>> y = x ** 2
>>> vaex.from_items(('x', x), ('y', y))
# x y
0 0 0
1 1 1
2 2 4
3 3 9
4 4 16
:param items: list of [(name, numpy array), ...]
:rtype: DataFrame
"""
return from_dict(dict(items))
def from_arrays(**arrays):
"""Create an in memory DataFrame from numpy arrays.
Example
>>> import vaex, numpy as np
>>> x = np.arange(5)
>>> y = x ** 2
>>> vaex.from_arrays(x=x, y=y)
# x y
0 0 0
1 1 1
2 2 4
3 3 9
4 4 16
>>> some_dict = {'x': x, 'y': y}
>>> vaex.from_arrays(**some_dict) # in case you have your columns in a dict
# x y
0 0 0
1 1 1
2 2 4
3 3 9
4 4 16
:param arrays: keyword arguments with arrays
:rtype: DataFrame
"""
import numpy as np
import six
dataset = vaex.dataset.DatasetArrays(arrays)
return vaex.dataframe.DataFrameLocal(dataset)
def from_arrow_table(table, as_numpy=True):
"""Creates a vaex DataFrame from an arrow Table.
:param as_numpy: Will lazily cast columns to a NumPy ndarray.
:rtype: DataFrame
"""
from vaex.arrow.dataset import from_table
return from_table(table=table, as_numpy=as_numpy)
def from_scalars(**kwargs):
"""Similar to from_arrays, but convenient for a DataFrame of length 1.
Example:
>>> import vaex
>>> df = vaex.from_scalars(x=1, y=2)
:rtype: DataFrame
"""
import numpy as np
return from_arrays(**{k: np.array([v]) for k, v in kwargs.items()})
def from_pandas(df, name="pandas", copy_index=False, index_name="index"):
"""Create an in memory DataFrame from a pandas DataFrame.
:param: pandas.DataFrame df: Pandas DataFrame
:param: name: unique for the DataFrame
>>> import vaex, pandas as pd
>>> df_pandas = pd.from_csv('test.csv')
>>> df = vaex.from_pandas(df_pandas)
:rtype: DataFrame
"""
import six
import pandas as pd
import numpy as np
import pyarrow as pa
columns = {}
def add(name, column):
values = column.values
# the first test is to support (partially) pandas 0.23
if hasattr(pd.core.arrays, 'integer') and isinstance(values, pd.core.arrays.integer.IntegerArray):
values = np.ma.array(values._data, mask=values._mask)
elif hasattr(pd.core.arrays, 'StringArray') and isinstance(values, pd.core.arrays.StringArray):
values = pa.array(values)
try:
columns[name] = vaex.dataset.to_supported_array(values)
except Exception as e:
print("could not convert column %s, error: %r, will try to convert it to string" % (name, e))
try:
values = values.astype("S")
columns[name] = vaex.dataset.to_supported_array(values)
except Exception as e:
print("Giving up column %s, error: %r" % (name, e))
for name in df.columns:
add(name, df[name])
if copy_index:
add(index_name, df.index)
return from_dict(columns)
def from_ascii(path, seperator=None, names=True, skip_lines=0, skip_after=0, **kwargs):
"""
Create an in memory DataFrame from an ascii file (whitespace seperated by default).
>>> ds = vx.from_ascii("table.asc")
>>> ds = vx.from_ascii("table.csv", seperator=",", names=["x", "y", "z"])
:param path: file path
:param seperator: value seperator, by default whitespace, use "," for comma seperated values.
:param names: If True, the first line is used for the column names, otherwise provide a list of strings with names
:param skip_lines: skip lines at the start of the file
:param skip_after: skip lines at the end of the file
:param kwargs:
:rtype: DataFrame
"""
import vaex.ext.readcol as rc
ds = vaex.dataframe.DataFrameLocal()
if names not in [True, False]:
namelist = names
names = False
else:
namelist = None
data = rc.readcol(path, fsep=seperator, asdict=namelist is None, names=names, skipline=skip_lines, skipafter=skip_after, **kwargs)
if namelist:
for name, array in zip(namelist, data.T):
ds.add_column(name, array)
else:
for name, array in data.items():
ds.add_column(name, array)
return ds
def from_json(path_or_buffer, orient=None, precise_float=False, lines=False, copy_index=False, **kwargs):
""" A method to read a JSON file using pandas, and convert to a DataFrame directly.
:param str path_or_buffer: a valid JSON string or file-like, default: None
The string could be a URL. Valid URL schemes include http, ftp, s3,
gcs, and file. For file URLs, a host is expected. For instance, a local
file could be ``file://localhost/path/to/table.json``
:param str orient: Indication of expected JSON string format. Allowed values are
``split``, ``records``, ``index``, ``columns``, and ``values``.
:param bool precise_float: Set to enable usage of higher precision (strtod) function when
decoding string to double values. Default (False) is to use fast but less precise builtin functionality
:param bool lines: Read the file as a json object per line.
:rtype: DataFrame
"""
# Check for unsupported kwargs
if kwargs.get('typ') == 'series':
raise ValueError('`typ` must be set to `"frame"`.')
if kwargs.get('numpy') == True:
raise ValueError('`numpy` must be set to `False`.')
if kwargs.get('chunksize') is not None:
raise ValueError('`chunksize` must be `None`.')
import pandas as pd
return from_pandas(pd.read_json(path_or_buffer, orient=orient, precise_float=precise_float, lines=lines, **kwargs),
copy_index=copy_index)
def from_csv(filename_or_buffer, copy_index=False, chunk_size=None, convert=False, **kwargs):
"""
Read a CSV file as a DataFrame, and optionally convert to an hdf5 file.
:param str or file filename_or_buffer: CSV file path or file-like
:param bool copy_index: copy index when source is read via Pandas
:param int chunk_size: if the CSV file is too big to fit in the memory this parameter can be used to read
CSV file in chunks. For example:
>>> import vaex
>>> for i, df in enumerate(vaex.from_csv('taxi.csv', chunk_size=100_000)):
>>> df = df[df.passenger_count < 6]
>>> df.export_hdf5(f'taxi_{i:02}.hdf5')
:param bool or str convert: convert files to an hdf5 file for optimization, can also be a path. The CSV
file will be read in chunks: either using the provided chunk_size argument, or a default size. Each chunk will
be saved as a separate hdf5 file, then all of them will be combined into one hdf5 file. So for a big CSV file
you will need at least double of extra space on the disk. Default chunk_size for converting is 5 million rows,
which corresponds to around 1Gb memory on an example of NYC Taxi dataset.
:param kwargs: extra keyword arguments, currently passed to Pandas read_csv function, but the implementation might
change in future versions.
:returns: DataFrame
"""
if not convert:
return _from_csv_read(filename_or_buffer=filename_or_buffer, copy_index=copy_index,
chunk_size=chunk_size, **kwargs)
else:
if chunk_size is None:
# make it memory efficient by default
chunk_size = 5_000_000
return _from_csv_convert_and_read(filename_or_buffer=filename_or_buffer, copy_index=copy_index,
maybe_convert_path=convert, chunk_size=chunk_size, **kwargs)
def _from_csv_read(filename_or_buffer, copy_index, chunk_size, **kwargs):
import pandas as pd
if not chunk_size:
full_df = pd.read_csv(filename_or_buffer, **kwargs)
return from_pandas(full_df, copy_index=copy_index)
else:
def iterator():
chunk_iterator = pd.read_csv(filename_or_buffer, chunksize=chunk_size, **kwargs)
for chunk_df in chunk_iterator:
yield from_pandas(chunk_df, copy_index=copy_index)
return iterator()
def _from_csv_convert_and_read(filename_or_buffer, copy_index, maybe_convert_path, chunk_size, **kwargs):
# figure out the CSV file path
if isinstance(filename_or_buffer, str):
csv_path = filename_or_buffer
elif isinstance(maybe_convert_path, str):
csv_path = re.sub(r'\.hdf5$', '', str(maybe_convert_path), flags=re.IGNORECASE)
else:
raise ValueError('Cannot derive filename to use for converted HDF5 file, '
'please specify it using convert="my.csv.hdf5"')
# reuse a previously converted HDF5 file
import vaex.file
combined_hdf5 = _convert_name(csv_path)
if os.path.exists(combined_hdf5):
return vaex.file.open(combined_hdf5)
# convert CSV chunks to separate HDF5 files
import pandas as pd
converted_paths = []
csv_reader = pd.read_csv(filename_or_buffer, chunksize=chunk_size, **kwargs)
for i, df_pandas in enumerate(csv_reader):
df = from_pandas(df_pandas, copy_index=copy_index)
filename_hdf5 = _convert_name(csv_path, suffix='_chunk%d' % i)
df.export_hdf5(filename_hdf5, shuffle=False)
converted_paths.append(filename_hdf5)
logger.info('saved chunk #%d to %s' % (i, filename_hdf5))
# combine chunks into one HDF5 file
if len(converted_paths) == 1:
# no need to merge several HDF5 files
os.rename(converted_paths[0], combined_hdf5)
else:
logger.info('converting %d chunks into single HDF5 file %s' % (len(converted_paths), combined_hdf5))
dfs = [vaex.file.open(p) for p in converted_paths]
df_combined = vaex.concat(dfs)
df_combined.export_hdf5(combined_hdf5, shuffle=False)
logger.info('deleting %d chunk files' % len(converted_paths))
for df, df_path in zip(dfs, converted_paths):
try:
df.close()
os.remove(df_path)
except Exception as e:
logger.error('Could not close or delete intermediate hdf5 file %s used to convert %s to hdf5: %s' % (
df_path, csv_path, e))
return vaex.file.open(combined_hdf5)
def read_csv(filepath_or_buffer, **kwargs):
'''Alias to from_csv.'''
return from_csv(filepath_or_buffer, **kwargs)
def read_csv_and_convert(path, shuffle=False, copy_index=False, **kwargs):
'''Convert a path (or glob pattern) to a single hdf5 file, will open the hdf5 file if exists.
Example:
>>> vaex.read_csv_and_convert('test-*.csv', shuffle=True) # this may take a while
>>> vaex.read_csv_and_convert('test-*.csv', shuffle=True) # 2nd time it is instant
:param str path: path of file or glob pattern for multiple files
:param bool shuffle: shuffle DataFrame when converting to hdf5
:param bool copy_index: by default pandas will create an index (row number), set to true if you want to include this as a column.
:param kwargs: parameters passed to pandas' read_cvs
'''
from concurrent.futures import ProcessPoolExecutor
import pandas as pd
filenames = glob.glob(path)
if len(filenames) > 1:
filename_hdf5 = _convert_name(filenames, shuffle=shuffle)
filename_hdf5_noshuffle = _convert_name(filenames, shuffle=False)
if not os.path.exists(filename_hdf5):
if not os.path.exists(filename_hdf5_noshuffle):
# with ProcessPoolExecutor() as executor:
# executor.submit(read_csv_and_convert, filenames, shuffle=shuffle, **kwargs)
for filename in filenames:
read_csv_and_convert(filename, shuffle=shuffle, copy_index=copy_index, **kwargs)
ds = open_many([_convert_name(k, shuffle=shuffle) for k in filenames])
else:
ds = open(filename_hdf5_noshuffle)
ds.export_hdf5(filename_hdf5, shuffle=shuffle)
return open(filename_hdf5)
else:
filename = filenames[0]
filename_hdf5 = _convert_name(filename, shuffle=shuffle)
filename_hdf5_noshuffle = _convert_name(filename, shuffle=False)
if not os.path.exists(filename_hdf5):
if not os.path.exists(filename_hdf5_noshuffle):
df = pd.read_csv(filename, **kwargs)
ds = from_pandas(df, copy_index=copy_index)
else:
ds = open(filename_hdf5_noshuffle)
ds.export_hdf5(filename_hdf5, shuffle=shuffle)
return open(filename_hdf5)
aliases = vaex.settings.main.auto_store_dict("aliases")
# py2/p3 compatibility
try:
from urllib.parse import urlparse, parse_qs
except ImportError:
from urlparse import urlparse, parse_qs
def connect(url, **kwargs):
"""Connect to hostname supporting the vaex web api.
:param str hostname: hostname or ip address of server
:rtype: vaex.server.client.Client
"""
# dispatch to vaex.server package
from vaex.server import connect
return connect(url, **kwargs)
def example():
"""Returns an example DataFrame which comes with vaex for testing/learning purposes.
:rtype: DataFrame
"""
return vaex.datasets.helmi_de_zeeuw_10percent.fetch()
def zeldovich(dim=2, N=256, n=-2.5, t=None, scale=1, seed=None):
"""Creates a zeldovich DataFrame.
"""
import vaex.file
return vaex.file.other.Zeldovich(dim=dim, N=N, n=n, t=t, scale=scale)
def set_log_level_debug():
"""set log level to debug"""
import logging
logging.getLogger("vaex").setLevel(logging.DEBUG)
def set_log_level_info():
"""set log level to info"""
import logging
logging.getLogger("vaex").setLevel(logging.INFO)
def set_log_level_warning():
"""set log level to warning"""
import logging
logging.getLogger("vaex").setLevel(logging.WARNING)
def set_log_level_exception():
"""set log level to exception"""
import logging
logging.getLogger("vaex").setLevel(logging.FATAL)
def set_log_level_off():
"""Disabled logging"""
import logging
logging.disable(logging.CRITICAL)
format = "%(levelname)s:%(threadName)s:%(name)s:%(message)s"
logging.basicConfig(level=logging.INFO, format=format)
DEBUG_MODE = bool(os.environ.get('VAEX_DEBUG', ''))
if DEBUG_MODE:
logging.basicConfig(level=logging.DEBUG)
set_log_level_debug()
else:
# logging.basicConfig(level=logging.DEBUG)
set_log_level_warning()
import_script = os.path.expanduser("~/.vaex/vaex_import.py")
if os.path.exists(import_script):
try:
with open(import_script) as f:
code = compile(f.read(), import_script, 'exec')
exec(code)
except:
import traceback
traceback.print_stack()
logger = logging.getLogger('vaex')
def register_dataframe_accessor(name, cls=None, override=False):
"""Registers a new accessor for a dataframe
See vaex.geo for an example.
"""
def wrapper(cls):
old_value = getattr(vaex.dataframe.DataFrame, name, None)
if old_value is not None and override is False:
raise ValueError("DataFrame already has a property/accessor named %r (%r)" % (name, old_value) )
def get_accessor(self):
if name in self.__dict__:
return self.__dict__[name]
else:
self.__dict__[name] = cls(self)
return self.__dict__[name]
setattr(vaex.dataframe.DataFrame, name, property(get_accessor))
return cls
if cls is None:
return wrapper
else:
return wrapper(cls)
for entry in pkg_resources.iter_entry_points(group='vaex.namespace'):
logger.warning('(DEPRECATED, use vaex.dataframe.accessor) adding vaex namespace: ' + entry.name)
try:
add_namespace = entry.load()
add_namespace()
except Exception:
logger.exception('issue loading ' + entry.name)
_df_lazy_accessors = {}
class _lazy_accessor(object):
def __init__(self, name, scope, loader):
"""When adding an accessor geo.cone, scope=='geo', name='cone', scope may be falsy"""
self.loader = loader
self.name = name
self.scope = scope
def __call__(self, obj):
if self.name in obj.__dict__:
return obj.__dict__[self.name]
else:
cls = self.loader()
accessor = cls(obj)
obj.__dict__[self.name] = accessor
fullname = self.name
if self.scope:
fullname = self.scope + '.' + self.name
if fullname in _df_lazy_accessors:
for name, scope, loader in _df_lazy_accessors[fullname]:
assert fullname == scope
setattr(cls, name, property(_lazy_accessor(name, scope, loader)))
return obj.__dict__[self.name]
def _add_lazy_accessor(name, loader, target_class=vaex.dataframe.DataFrame):
"""Internal use see tests/internal/accessor_test.py for usage
This enables us to have df.foo.bar accessors that lazily loads the modules.
"""
parts = name.split('.')
target_class = vaex.dataframe.DataFrame
if len(parts) == 1:
setattr(target_class, parts[0], property(_lazy_accessor(name, None, loader)))
else:
scope = ".".join(parts[:-1])
if scope not in _df_lazy_accessors:
_df_lazy_accessors[scope] = []
_df_lazy_accessors[scope].append((parts[-1], scope, loader))
for entry in pkg_resources.iter_entry_points(group='vaex.dataframe.accessor'):
logger.debug('adding vaex accessor: ' + entry.name)
def loader(entry=entry):
return entry.load()
_add_lazy_accessor(entry.name, loader)
for entry in pkg_resources.iter_entry_points(group='vaex.plugin'):
if entry.module_name == 'vaex_arrow.opener':
# if vaex_arrow package is installed, we ignore it
continue
logger.debug('adding vaex plugin: ' + entry.name)
try:
add_namespace = entry.load()
add_namespace()
except Exception:
logger.exception('issue loading ' + entry.name)
def concat(dfs):
'''Concatenate a list of DataFrames.
:rtype: DataFrame
'''
df, *tail = dfs
return df.concat(*tail)
def vrange(start, stop, step=1, dtype='f8'):
"""Creates a virtual column which is the equivalent of numpy.arange, but uses 0 memory"""
from .column import ColumnVirtualRange
return ColumnVirtualRange(start, stop, step, dtype)
def string_column(strings):
import pyarrow as pa
return pa.array(strings)
|
|
# Licensed to the Software Freedom Conservancy (SFC) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The SFC licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""The WebDriver implementation."""
import copy
from importlib import import_module
import pkgutil
import sys
from typing import Dict, List, Optional, Union
import warnings
from abc import ABCMeta
from base64 import b64decode
from contextlib import asynccontextmanager, contextmanager
from .bidi_connection import BidiConnection
from .command import Command
from .errorhandler import ErrorHandler
from .file_detector import FileDetector, LocalFileDetector
from .mobile import Mobile
from .remote_connection import RemoteConnection
from .script_key import ScriptKey
from .shadowroot import ShadowRoot
from .switch_to import SwitchTo
from .webelement import WebElement
from selenium.common.exceptions import (InvalidArgumentException,
JavascriptException,
WebDriverException,
NoSuchCookieException,
NoSuchElementException)
from selenium.webdriver.common.by import By
from selenium.webdriver.common.options import BaseOptions
from selenium.webdriver.common.print_page_options import PrintOptions
from selenium.webdriver.common.timeouts import Timeouts
from selenium.webdriver.common.html5.application_cache import ApplicationCache
from selenium.webdriver.support.relative_locator import RelativeBy
_W3C_CAPABILITY_NAMES = frozenset([
'acceptInsecureCerts',
'browserName',
'browserVersion',
'pageLoadStrategy',
'platformName',
'proxy',
'setWindowRect',
'strictFileInteractability',
'timeouts',
'unhandledPromptBehavior',
'webSocketUrl'
])
_OSS_W3C_CONVERSION = {
'acceptSslCerts': 'acceptInsecureCerts',
'version': 'browserVersion',
'platform': 'platformName'
}
cdp = None
def import_cdp():
global cdp
if not cdp:
cdp = import_module("selenium.webdriver.common.bidi.cdp")
def _make_w3c_caps(caps):
"""Makes a W3C alwaysMatch capabilities object.
Filters out capability names that are not in the W3C spec. Spec-compliant
drivers will reject requests containing unknown capability names.
Moves the Firefox profile, if present, from the old location to the new Firefox
options object.
:Args:
- caps - A dictionary of capabilities requested by the caller.
"""
caps = copy.deepcopy(caps)
profile = caps.get('firefox_profile')
always_match = {}
if caps.get('proxy') and caps['proxy'].get('proxyType'):
caps['proxy']['proxyType'] = caps['proxy']['proxyType'].lower()
for k, v in caps.items():
if v and k in _OSS_W3C_CONVERSION:
always_match[_OSS_W3C_CONVERSION[k]] = v.lower() if k == 'platform' else v
if k in _W3C_CAPABILITY_NAMES or ':' in k:
always_match[k] = v
if profile:
moz_opts = always_match.get('moz:firefoxOptions', {})
# If it's already present, assume the caller did that intentionally.
if 'profile' not in moz_opts:
# Don't mutate the original capabilities.
new_opts = copy.deepcopy(moz_opts)
new_opts['profile'] = profile
always_match['moz:firefoxOptions'] = new_opts
return {"firstMatch": [{}], "alwaysMatch": always_match}
def get_remote_connection(capabilities, command_executor, keep_alive, ignore_local_proxy=False):
from selenium.webdriver.chromium.remote_connection import ChromiumRemoteConnection
from selenium.webdriver.safari.remote_connection import SafariRemoteConnection
from selenium.webdriver.firefox.remote_connection import FirefoxRemoteConnection
candidates = [RemoteConnection] + [ChromiumRemoteConnection, SafariRemoteConnection, FirefoxRemoteConnection]
handler = next(
(c for c in candidates if c.browser_name == capabilities.get('browserName')),
RemoteConnection
)
return handler(command_executor, keep_alive=keep_alive, ignore_proxy=ignore_local_proxy)
def create_matches(options: List[BaseOptions]) -> Dict:
capabilities = {"capabilities": {}}
opts = []
for opt in options:
opts.append(opt.to_capabilities())
opts_size = len(opts)
samesies = {}
# Can not use bitwise operations on the dicts or lists due to
# https://bugs.python.org/issue38210
for i in range(opts_size):
min_index = i
if i + 1 < opts_size:
first_keys = opts[min_index].keys()
for kys in first_keys:
if kys in opts[i + 1].keys():
if opts[min_index][kys] == opts[i + 1][kys]:
samesies.update({kys: opts[min_index][kys]})
always = {}
for k, v in samesies.items():
always[k] = v
for i in opts:
for k in always.keys():
del i[k]
capabilities["capabilities"]["alwaysMatch"] = always
capabilities["capabilities"]["firstMatch"] = opts
return capabilities
class BaseWebDriver(metaclass=ABCMeta):
"""
Abstract Base Class for all Webdriver subtypes.
ABC's allow custom implementations of Webdriver to be registered so that isinstance type checks
will succeed.
"""
class WebDriver(BaseWebDriver):
"""
Controls a browser by sending commands to a remote server.
This server is expected to be running the WebDriver wire protocol
as defined at
https://github.com/SeleniumHQ/selenium/wiki/JsonWireProtocol
:Attributes:
- session_id - String ID of the browser session started and controlled by this WebDriver.
- capabilities - Dictionary of effective capabilities of this browser session as returned
by the remote server. See https://github.com/SeleniumHQ/selenium/wiki/DesiredCapabilities
- command_executor - remote_connection.RemoteConnection object used to execute commands.
- error_handler - errorhandler.ErrorHandler object used to handle errors.
"""
_web_element_cls = WebElement
_shadowroot_cls = ShadowRoot
def __init__(self, command_executor='http://127.0.0.1:4444',
desired_capabilities=None, browser_profile=None, proxy=None,
keep_alive=True, file_detector=None, options: Union[BaseOptions, List[BaseOptions]] = None):
"""
Create a new driver that will issue commands using the wire protocol.
:Args:
- command_executor - Either a string representing URL of the remote server or a custom
remote_connection.RemoteConnection object. Defaults to 'http://127.0.0.1:4444/wd/hub'.
- desired_capabilities - A dictionary of capabilities to request when
starting the browser session. Required parameter.
- browser_profile - A selenium.webdriver.firefox.firefox_profile.FirefoxProfile object.
Only used if Firefox is requested. Optional.
- proxy - A selenium.webdriver.common.proxy.Proxy object. The browser session will
be started with given proxy settings, if possible. Optional.
- keep_alive - Whether to configure remote_connection.RemoteConnection to use
HTTP keep-alive. Defaults to True.
- file_detector - Pass custom file detector object during instantiation. If None,
then default LocalFileDetector() will be used.
- options - instance of a driver options.Options class
"""
if desired_capabilities:
warnings.warn(
"desired_capabilities has been deprecated, please pass in an Options object with options kwarg",
DeprecationWarning,
stacklevel=2
)
if browser_profile:
warnings.warn(
"browser_profile has been deprecated, please pass in an Firefox Options object with options kwarg",
DeprecationWarning,
stacklevel=2
)
if proxy:
warnings.warn(
"proxy has been deprecated, please pass in an Options object with options kwarg",
DeprecationWarning,
stacklevel=2
)
if not keep_alive:
warnings.warn(
"keep_alive has been deprecated. We will be using True as the default value as we start removing it.",
DeprecationWarning,
stacklevel=2
)
capabilities = {}
# If we get a list we can assume that no capabilities
# have been passed in
if isinstance(options, list):
capabilities = create_matches(options)
else:
_ignore_local_proxy = False
if options:
capabilities = options.to_capabilities()
_ignore_local_proxy = options._ignore_local_proxy
if desired_capabilities:
if not isinstance(desired_capabilities, dict):
raise WebDriverException("Desired Capabilities must be a dictionary")
else:
capabilities.update(desired_capabilities)
self.command_executor = command_executor
if isinstance(self.command_executor, (str, bytes)):
self.command_executor = get_remote_connection(capabilities, command_executor=command_executor,
keep_alive=keep_alive,
ignore_local_proxy=_ignore_local_proxy)
self._is_remote = True
self.session_id = None
self.caps = {}
self.pinned_scripts = {}
self.error_handler = ErrorHandler()
self._switch_to = SwitchTo(self)
self._mobile = Mobile(self)
self.file_detector = file_detector or LocalFileDetector()
self.start_client()
self.start_session(capabilities, browser_profile)
def __repr__(self):
return '<{0.__module__}.{0.__name__} (session="{1}")>'.format(
type(self), self.session_id)
def __enter__(self):
return self
def __exit__(self, *args):
self.quit()
@contextmanager
def file_detector_context(self, file_detector_class, *args, **kwargs):
"""
Overrides the current file detector (if necessary) in limited context.
Ensures the original file detector is set afterwards.
Example:
with webdriver.file_detector_context(UselessFileDetector):
someinput.send_keys('/etc/hosts')
:Args:
- file_detector_class - Class of the desired file detector. If the class is different
from the current file_detector, then the class is instantiated with args and kwargs
and used as a file detector during the duration of the context manager.
- args - Optional arguments that get passed to the file detector class during
instantiation.
- kwargs - Keyword arguments, passed the same way as args.
"""
last_detector = None
if not isinstance(self.file_detector, file_detector_class):
last_detector = self.file_detector
self.file_detector = file_detector_class(*args, **kwargs)
try:
yield
finally:
if last_detector:
self.file_detector = last_detector
@property
def mobile(self):
return self._mobile
@property
def name(self) -> str:
"""Returns the name of the underlying browser for this instance.
:Usage:
::
name = driver.name
"""
if 'browserName' in self.caps:
return self.caps['browserName']
else:
raise KeyError('browserName not specified in session capabilities')
def start_client(self):
"""
Called before starting a new session. This method may be overridden
to define custom startup behavior.
"""
pass
def stop_client(self):
"""
Called after executing a quit command. This method may be overridden
to define custom shutdown behavior.
"""
pass
def start_session(self, capabilities: dict, browser_profile=None) -> None:
"""
Creates a new session with the desired capabilities.
:Args:
- capabilities - a capabilities dict to start the session with.
- browser_profile - A selenium.webdriver.firefox.firefox_profile.FirefoxProfile object. Only used if Firefox is requested.
"""
if not isinstance(capabilities, dict):
raise InvalidArgumentException("Capabilities must be a dictionary")
if browser_profile:
if "moz:firefoxOptions" in capabilities:
capabilities["moz:firefoxOptions"]["profile"] = browser_profile.encoded
else:
capabilities.update({'firefox_profile': browser_profile.encoded})
w3c_caps = _make_w3c_caps(capabilities)
parameters = {"capabilities": w3c_caps,
"desiredCapabilities": capabilities}
response = self.execute(Command.NEW_SESSION, parameters)
if 'sessionId' not in response:
response = response['value']
self.session_id = response['sessionId']
self.caps = response.get('value')
# if capabilities is none we are probably speaking to
# a W3C endpoint
if not self.caps:
self.caps = response.get('capabilities')
def _wrap_value(self, value):
if isinstance(value, dict):
converted = {}
for key, val in value.items():
converted[key] = self._wrap_value(val)
return converted
elif isinstance(value, self._web_element_cls):
return {'element-6066-11e4-a52e-4f735466cecf': value.id}
elif isinstance(value, self._shadowroot_cls):
return {'shadow-6066-11e4-a52e-4f735466cecf': value.id}
elif isinstance(value, list):
return list(self._wrap_value(item) for item in value)
else:
return value
def create_web_element(self, element_id: str) -> WebElement:
"""Creates a web element with the specified `element_id`."""
return self._web_element_cls(self, element_id)
def _unwrap_value(self, value):
if isinstance(value, dict):
if 'element-6066-11e4-a52e-4f735466cecf' in value:
return self.create_web_element(value['element-6066-11e4-a52e-4f735466cecf'])
elif 'shadow-6066-11e4-a52e-4f735466cecf' in value:
return self._shadowroot_cls(self, value['shadow-6066-11e4-a52e-4f735466cecf'])
else:
for key, val in value.items():
value[key] = self._unwrap_value(val)
return value
elif isinstance(value, list):
return list(self._unwrap_value(item) for item in value)
else:
return value
def execute(self, driver_command: str, params: dict = None) -> dict:
"""
Sends a command to be executed by a command.CommandExecutor.
:Args:
- driver_command: The name of the command to execute as a string.
- params: A dictionary of named parameters to send with the command.
:Returns:
The command's JSON response loaded into a dictionary object.
"""
if self.session_id:
if not params:
params = {'sessionId': self.session_id}
elif 'sessionId' not in params:
params['sessionId'] = self.session_id
params = self._wrap_value(params)
response = self.command_executor.execute(driver_command, params)
if response:
self.error_handler.check_response(response)
response['value'] = self._unwrap_value(
response.get('value', None))
return response
# If the server doesn't send a response, assume the command was
# a success
return {'success': 0, 'value': None, 'sessionId': self.session_id}
def get(self, url: str) -> None:
"""
Loads a web page in the current browser session.
"""
self.execute(Command.GET, {'url': url})
@property
def title(self) -> str:
"""Returns the title of the current page.
:Usage:
::
title = driver.title
"""
resp = self.execute(Command.GET_TITLE)
return resp['value'] if resp['value'] else ""
def find_element_by_id(self, id_) -> WebElement:
"""Finds an element by id.
:Args:
- id\\_ - The id of the element to be found.
:Returns:
- WebElement - the element if it was found
:Raises:
- NoSuchElementException - if the element wasn't found
:Usage:
::
element = driver.find_element_by_id('foo')
"""
warnings.warn(
"find_element_by_* commands are deprecated. Please use find_element() instead",
DeprecationWarning,
stacklevel=2,
)
return self.find_element(by=By.ID, value=id_)
def find_elements_by_id(self, id_) -> List[WebElement]:
"""
Finds multiple elements by id.
:Args:
- id\\_ - The id of the elements to be found.
:Returns:
- list of WebElement - a list with elements if any was found. An
empty list if not
:Usage:
::
elements = driver.find_elements_by_id('foo')
"""
warnings.warn(
"find_elements_by_id is deprecated. Please use find_elements(by=By.ID, value=id_) instead",
DeprecationWarning,
stacklevel=2,
)
return self.find_elements(by=By.ID, value=id_)
def find_element_by_xpath(self, xpath) -> WebElement:
"""
Finds an element by xpath.
:Args:
- xpath - The xpath locator of the element to find.
:Returns:
- WebElement - the element if it was found
:Raises:
- NoSuchElementException - if the element wasn't found
:Usage:
::
element = driver.find_element_by_xpath('//div/td[1]')
"""
warnings.warn(
"find_element_by_xpath is deprecated. Please use find_element(by=By.XPATH, value=xpath) instead",
DeprecationWarning,
stacklevel=2,
)
return self.find_element(by=By.XPATH, value=xpath)
def find_elements_by_xpath(self, xpath) -> List[WebElement]:
"""
Finds multiple elements by xpath.
:Args:
- xpath - The xpath locator of the elements to be found.
:Returns:
- list of WebElement - a list with elements if any was found. An
empty list if not
:Usage:
::
elements = driver.find_elements_by_xpath("//div[contains(@class, 'foo')]")
"""
warnings.warn(
"find_elements_by_xpath is deprecated. Please use find_elements(by=By.XPATH, value=xpath) instead",
DeprecationWarning,
stacklevel=2,
)
return self.find_elements(by=By.XPATH, value=xpath)
def find_element_by_link_text(self, link_text) -> WebElement:
"""
Finds an element by link text.
:Args:
- link_text: The text of the element to be found.
:Returns:
- WebElement - the element if it was found
:Raises:
- NoSuchElementException - if the element wasn't found
:Usage:
::
element = driver.find_element_by_link_text('Sign In')
"""
warnings.warn(
"find_element_by_link_text is deprecated. Please use find_element(by=By.LINK_TEXT, value=link_text) instead",
DeprecationWarning,
stacklevel=2,
)
return self.find_element(by=By.LINK_TEXT, value=link_text)
def find_elements_by_link_text(self, text) -> List[WebElement]:
"""
Finds elements by link text.
:Args:
- link_text: The text of the elements to be found.
:Returns:
- list of webelement - a list with elements if any was found. an
empty list if not
:Usage:
::
elements = driver.find_elements_by_link_text('Sign In')
"""
warnings.warn(
"find_elements_by_link_text is deprecated. Please use find_elements(by=By.LINK_TEXT, value=text) instead",
DeprecationWarning,
stacklevel=2,
)
return self.find_elements(by=By.LINK_TEXT, value=text)
def find_element_by_partial_link_text(self, link_text) -> WebElement:
"""
Finds an element by a partial match of its link text.
:Args:
- link_text: The text of the element to partially match on.
:Returns:
- WebElement - the element if it was found
:Raises:
- NoSuchElementException - if the element wasn't found
:Usage:
::
element = driver.find_element_by_partial_link_text('Sign')
"""
warnings.warn(
"find_element_by_partial_link_text is deprecated. Please use find_element(by=By.PARTIAL_LINK_TEXT, value=link_text) instead",
DeprecationWarning,
stacklevel=2,
)
return self.find_element(by=By.PARTIAL_LINK_TEXT, value=link_text)
def find_elements_by_partial_link_text(self, link_text) -> List[WebElement]:
"""
Finds elements by a partial match of their link text.
:Args:
- link_text: The text of the element to partial match on.
:Returns:
- list of webelement - a list with elements if any was found. an
empty list if not
:Usage:
::
elements = driver.find_elements_by_partial_link_text('Sign')
"""
warnings.warn(
"find_elements_by_partial_link_text is deprecated. Please use find_elements(by=By.PARTIAL_LINK_TEXT, value=link_text) instead",
DeprecationWarning,
stacklevel=2,
)
return self.find_elements(by=By.PARTIAL_LINK_TEXT, value=link_text)
def find_element_by_name(self, name) -> WebElement:
"""
Finds an element by name.
:Args:
- name: The name of the element to find.
:Returns:
- WebElement - the element if it was found
:Raises:
- NoSuchElementException - if the element wasn't found
:Usage:
::
element = driver.find_element_by_name('foo')
"""
warnings.warn(
"find_element_by_name is deprecated. Please use find_element(by=By.NAME, value=name) instead",
DeprecationWarning,
stacklevel=2,
)
return self.find_element(by=By.NAME, value=name)
def find_elements_by_name(self, name) -> List[WebElement]:
"""
Finds elements by name.
:Args:
- name: The name of the elements to find.
:Returns:
- list of webelement - a list with elements if any was found. an
empty list if not
:Usage:
::
elements = driver.find_elements_by_name('foo')
"""
warnings.warn(
"find_elements_by_name is deprecated. Please use find_elements(by=By.NAME, value=name)=By.NAME, value=name) instead",
DeprecationWarning,
stacklevel=2,
)
return self.find_elements(by=By.NAME, value=name)
def find_element_by_tag_name(self, name) -> WebElement:
"""
Finds an element by tag name.
:Args:
- name - name of html tag (eg: h1, a, span)
:Returns:
- WebElement - the element if it was found
:Raises:
- NoSuchElementException - if the element wasn't found
:Usage:
::
element = driver.find_element_by_tag_name('h1')
"""
warnings.warn(
"find_element_by_tag_name is deprecated. Please use find_element(by=By.TAG_NAME, value=name) instead",
DeprecationWarning,
stacklevel=2,
)
return self.find_element(by=By.TAG_NAME, value=name)
def find_elements_by_tag_name(self, name) -> List[WebElement]:
"""
Finds elements by tag name.
:Args:
- name - name of html tag (eg: h1, a, span)
:Returns:
- list of WebElement - a list with elements if any was found. An
empty list if not
:Usage:
::
elements = driver.find_elements_by_tag_name('h1')
"""
warnings.warn(
"find_elements_by_tag_name is deprecated. Please use find_elements(by=By.TAG_NAME, value=name) instead",
DeprecationWarning,
stacklevel=2,
)
return self.find_elements(by=By.TAG_NAME, value=name)
def find_element_by_class_name(self, name) -> WebElement:
"""
Finds an element by class name.
:Args:
- name: The class name of the element to find.
:Returns:
- WebElement - the element if it was found
:Raises:
- NoSuchElementException - if the element wasn't found
:Usage:
::
element = driver.find_element_by_class_name('foo')
"""
warnings.warn(
"find_element_by_class_name is deprecated. Please use find_element(by=By.CLASS_NAME, value=name) instead",
DeprecationWarning,
stacklevel=2,
)
return self.find_element(by=By.CLASS_NAME, value=name)
def find_elements_by_class_name(self, name) -> List[WebElement]:
"""
Finds elements by class name.
:Args:
- name: The class name of the elements to find.
:Returns:
- list of WebElement - a list with elements if any was found. An
empty list if not
:Usage:
::
elements = driver.find_elements_by_class_name('foo')
"""
warnings.warn(
"find_elements_by_class_name is deprecated. Please use find_elements(by=By.CLASS_NAME, value=name) instead",
DeprecationWarning,
stacklevel=2,
)
return self.find_elements(by=By.CLASS_NAME, value=name)
def find_element_by_css_selector(self, css_selector) -> WebElement:
"""
Finds an element by css selector.
:Args:
- css_selector - CSS selector string, ex: 'a.nav#home'
:Returns:
- WebElement - the element if it was found
:Raises:
- NoSuchElementException - if the element wasn't found
:Usage:
::
element = driver.find_element_by_css_selector('#foo')
"""
warnings.warn(
"find_element_by_css_selector is deprecated. Please use find_element(by=By.CSS_SELECTOR, value=css_selector) instead",
DeprecationWarning,
stacklevel=2,
)
return self.find_element(by=By.CSS_SELECTOR, value=css_selector)
def find_elements_by_css_selector(self, css_selector) -> List[WebElement]:
"""
Finds elements by css selector.
:Args:
- css_selector - CSS selector string, ex: 'a.nav#home'
:Returns:
- list of WebElement - a list with elements if any was found. An
empty list if not
:Usage:
::
elements = driver.find_elements_by_css_selector('.foo')
"""
warnings.warn(
"find_elements_by_css_selector is deprecated. Please use find_elements(by=By.CSS_SELECTOR, value=css_selector) instead",
DeprecationWarning,
stacklevel=2,
)
return self.find_elements(by=By.CSS_SELECTOR, value=css_selector)
def pin_script(self, script, script_key=None) -> ScriptKey:
"""
"""
if not script_key:
_script_key = ScriptKey()
else:
_script_key = ScriptKey(script_key)
self.pinned_scripts[_script_key.id] = script
return _script_key
def unpin(self, script_key) -> None:
"""
"""
self.pinned_scripts.pop(script_key.id)
def get_pinned_scripts(self) -> List[str]:
"""
"""
return list(self.pinned_scripts.keys())
def execute_script(self, script, *args):
"""
Synchronously Executes JavaScript in the current window/frame.
:Args:
- script: The JavaScript to execute.
- \\*args: Any applicable arguments for your JavaScript.
:Usage:
::
driver.execute_script('return document.title;')
"""
if isinstance(script, ScriptKey):
try:
script = self.pinned_scripts[script.id]
except KeyError:
raise JavascriptException("Pinned script could not be found")
converted_args = list(args)
command = Command.W3C_EXECUTE_SCRIPT
return self.execute(command, {
'script': script,
'args': converted_args})['value']
def execute_async_script(self, script: str, *args):
"""
Asynchronously Executes JavaScript in the current window/frame.
:Args:
- script: The JavaScript to execute.
- \\*args: Any applicable arguments for your JavaScript.
:Usage:
::
script = "var callback = arguments[arguments.length - 1]; " \\
"window.setTimeout(function(){ callback('timeout') }, 3000);"
driver.execute_async_script(script)
"""
converted_args = list(args)
command = Command.W3C_EXECUTE_SCRIPT_ASYNC
return self.execute(command, {
'script': script,
'args': converted_args})['value']
@property
def current_url(self) -> str:
"""
Gets the URL of the current page.
:Usage:
::
driver.current_url
"""
return self.execute(Command.GET_CURRENT_URL)['value']
@property
def page_source(self) -> str:
"""
Gets the source of the current page.
:Usage:
::
driver.page_source
"""
return self.execute(Command.GET_PAGE_SOURCE)['value']
def close(self) -> None:
"""
Closes the current window.
:Usage:
::
driver.close()
"""
self.execute(Command.CLOSE)
def quit(self) -> None:
"""
Quits the driver and closes every associated window.
:Usage:
::
driver.quit()
"""
try:
self.execute(Command.QUIT)
finally:
self.stop_client()
self.command_executor.close()
@property
def current_window_handle(self) -> str:
"""
Returns the handle of the current window.
:Usage:
::
driver.current_window_handle
"""
return self.execute(Command.W3C_GET_CURRENT_WINDOW_HANDLE)['value']
@property
def window_handles(self) -> List[str]:
"""
Returns the handles of all windows within the current session.
:Usage:
::
driver.window_handles
"""
return self.execute(Command.W3C_GET_WINDOW_HANDLES)['value']
def maximize_window(self) -> None:
"""
Maximizes the current window that webdriver is using
"""
params = None
command = Command.W3C_MAXIMIZE_WINDOW
self.execute(command, params)
def fullscreen_window(self) -> None:
"""
Invokes the window manager-specific 'full screen' operation
"""
self.execute(Command.FULLSCREEN_WINDOW)
def minimize_window(self) -> None:
"""
Invokes the window manager-specific 'minimize' operation
"""
self.execute(Command.MINIMIZE_WINDOW)
def print_page(self, print_options: Optional[PrintOptions] = None) -> str:
"""
Takes PDF of the current page.
The driver makes a best effort to return a PDF based on the provided parameters.
"""
options = {}
if print_options:
options = print_options.to_dict()
return self.execute(Command.PRINT_PAGE, options)['value']
@property
def switch_to(self) -> SwitchTo:
"""
:Returns:
- SwitchTo: an object containing all options to switch focus into
:Usage:
::
element = driver.switch_to.active_element
alert = driver.switch_to.alert
driver.switch_to.default_content()
driver.switch_to.frame('frame_name')
driver.switch_to.frame(1)
driver.switch_to.frame(driver.find_elements_by_tag_name("iframe")[0])
driver.switch_to.parent_frame()
driver.switch_to.window('main')
"""
return self._switch_to
# Navigation
def back(self) -> None:
"""
Goes one step backward in the browser history.
:Usage:
::
driver.back()
"""
self.execute(Command.GO_BACK)
def forward(self) -> None:
"""
Goes one step forward in the browser history.
:Usage:
::
driver.forward()
"""
self.execute(Command.GO_FORWARD)
def refresh(self) -> None:
"""
Refreshes the current page.
:Usage:
::
driver.refresh()
"""
self.execute(Command.REFRESH)
# Options
def get_cookies(self) -> List[dict]:
"""
Returns a set of dictionaries, corresponding to cookies visible in the current session.
:Usage:
::
driver.get_cookies()
"""
return self.execute(Command.GET_ALL_COOKIES)['value']
def get_cookie(self, name) -> dict:
"""
Get a single cookie by name. Returns the cookie if found, None if not.
:Usage:
::
driver.get_cookie('my_cookie')
"""
try:
return self.execute(Command.GET_COOKIE, {'name': name})['value']
except NoSuchCookieException:
return None
def delete_cookie(self, name) -> None:
"""
Deletes a single cookie with the given name.
:Usage:
::
driver.delete_cookie('my_cookie')
"""
self.execute(Command.DELETE_COOKIE, {'name': name})
def delete_all_cookies(self) -> None:
"""
Delete all cookies in the scope of the session.
:Usage:
::
driver.delete_all_cookies()
"""
self.execute(Command.DELETE_ALL_COOKIES)
def add_cookie(self, cookie_dict) -> None:
"""
Adds a cookie to your current session.
:Args:
- cookie_dict: A dictionary object, with required keys - "name" and "value";
optional keys - "path", "domain", "secure", "httpOnly", "expiry", "sameSite"
Usage:
driver.add_cookie({'name' : 'foo', 'value' : 'bar'})
driver.add_cookie({'name' : 'foo', 'value' : 'bar', 'path' : '/'})
driver.add_cookie({'name' : 'foo', 'value' : 'bar', 'path' : '/', 'secure':True})
driver.add_cookie({'name': 'foo', 'value': 'bar', 'sameSite': 'Strict'})
"""
if 'sameSite' in cookie_dict:
assert cookie_dict['sameSite'] in ['Strict', 'Lax', 'None']
self.execute(Command.ADD_COOKIE, {'cookie': cookie_dict})
else:
self.execute(Command.ADD_COOKIE, {'cookie': cookie_dict})
# Timeouts
def implicitly_wait(self, time_to_wait) -> None:
"""
Sets a sticky timeout to implicitly wait for an element to be found,
or a command to complete. This method only needs to be called one
time per session. To set the timeout for calls to
execute_async_script, see set_script_timeout.
:Args:
- time_to_wait: Amount of time to wait (in seconds)
:Usage:
::
driver.implicitly_wait(30)
"""
self.execute(Command.SET_TIMEOUTS, {
'implicit': int(float(time_to_wait) * 1000)})
def set_script_timeout(self, time_to_wait) -> None:
"""
Set the amount of time that the script should wait during an
execute_async_script call before throwing an error.
:Args:
- time_to_wait: The amount of time to wait (in seconds)
:Usage:
::
driver.set_script_timeout(30)
"""
self.execute(Command.SET_TIMEOUTS, {
'script': int(float(time_to_wait) * 1000)})
def set_page_load_timeout(self, time_to_wait) -> None:
"""
Set the amount of time to wait for a page load to complete
before throwing an error.
:Args:
- time_to_wait: The amount of time to wait
:Usage:
::
driver.set_page_load_timeout(30)
"""
try:
self.execute(Command.SET_TIMEOUTS, {
'pageLoad': int(float(time_to_wait) * 1000)})
except WebDriverException:
self.execute(Command.SET_TIMEOUTS, {
'ms': float(time_to_wait) * 1000,
'type': 'page load'})
@property
def timeouts(self) -> Timeouts:
"""
Get all the timeouts that have been set on the current session
:Usage:
::
driver.timeouts
:rtype: Timeout
"""
timeouts = self.execute(Command.GET_TIMEOUTS)['value']
timeouts["implicit_wait"] = timeouts.pop("implicit") / 1000
timeouts["page_load"] = timeouts.pop("pageLoad") / 1000
timeouts["script"] = timeouts.pop("script") / 1000
return Timeouts(**timeouts)
@timeouts.setter
def timeouts(self, timeouts) -> None:
"""
Set all timeouts for the session. This will override any previously
set timeouts.
:Usage:
::
my_timeouts = Timeouts()
my_timeouts.implicit_wait = 10
driver.timeouts = my_timeouts
"""
self.execute(Command.SET_TIMEOUTS, timeouts._to_json())['value']
def find_element(self, by=By.ID, value=None) -> WebElement:
"""
Find an element given a By strategy and locator.
:Usage:
::
element = driver.find_element(By.ID, 'foo')
:rtype: WebElement
"""
if isinstance(by, RelativeBy):
elements = self.find_elements(by=by, value=value)
if not elements:
raise NoSuchElementException(f"Cannot locate relative element with: {by.root}")
return elements[0]
if by == By.ID:
by = By.CSS_SELECTOR
value = '[id="%s"]' % value
elif by == By.TAG_NAME:
by = By.CSS_SELECTOR
elif by == By.CLASS_NAME:
by = By.CSS_SELECTOR
value = ".%s" % value
elif by == By.NAME:
by = By.CSS_SELECTOR
value = '[name="%s"]' % value
return self.execute(Command.FIND_ELEMENT, {
'using': by,
'value': value})['value']
def find_elements(self, by=By.ID, value=None) -> List[WebElement]:
"""
Find elements given a By strategy and locator.
:Usage:
::
elements = driver.find_elements(By.CLASS_NAME, 'foo')
:rtype: list of WebElement
"""
if isinstance(by, RelativeBy):
_pkg = '.'.join(__name__.split('.')[:-1])
raw_function = pkgutil.get_data(_pkg, 'findElements.js').decode('utf8')
find_element_js = "return ({}).apply(null, arguments);".format(raw_function)
return self.execute_script(find_element_js, by.to_dict())
if by == By.ID:
by = By.CSS_SELECTOR
value = '[id="%s"]' % value
elif by == By.TAG_NAME:
by = By.CSS_SELECTOR
elif by == By.CLASS_NAME:
by = By.CSS_SELECTOR
value = ".%s" % value
elif by == By.NAME:
by = By.CSS_SELECTOR
value = '[name="%s"]' % value
# Return empty list if driver returns null
# See https://github.com/SeleniumHQ/selenium/issues/4555
return self.execute(Command.FIND_ELEMENTS, {
'using': by,
'value': value})['value'] or []
@property
def desired_capabilities(self) -> dict:
"""
returns the drivers current desired capabilities being used
"""
warnings.warn("desired_capabilities is deprecated. Please call capabilities.",
DeprecationWarning, stacklevel=2)
return self.caps
@property
def capabilities(self) -> dict:
"""
returns the drivers current capabilities being used.
"""
return self.caps
def get_screenshot_as_file(self, filename) -> bool:
"""
Saves a screenshot of the current window to a PNG image file. Returns
False if there is any IOError, else returns True. Use full paths in
your filename.
:Args:
- filename: The full path you wish to save your screenshot to. This
should end with a `.png` extension.
:Usage:
::
driver.get_screenshot_as_file('/Screenshots/foo.png')
"""
if not filename.lower().endswith('.png'):
warnings.warn("name used for saved screenshot does not match file "
"type. It should end with a `.png` extension", UserWarning)
png = self.get_screenshot_as_png()
try:
with open(filename, 'wb') as f:
f.write(png)
except IOError:
return False
finally:
del png
return True
def save_screenshot(self, filename) -> bool:
"""
Saves a screenshot of the current window to a PNG image file. Returns
False if there is any IOError, else returns True. Use full paths in
your filename.
:Args:
- filename: The full path you wish to save your screenshot to. This
should end with a `.png` extension.
:Usage:
::
driver.save_screenshot('/Screenshots/foo.png')
"""
return self.get_screenshot_as_file(filename)
def get_screenshot_as_png(self) -> bytes:
"""
Gets the screenshot of the current window as a binary data.
:Usage:
::
driver.get_screenshot_as_png()
"""
return b64decode(self.get_screenshot_as_base64().encode('ascii'))
def get_screenshot_as_base64(self) -> str:
"""
Gets the screenshot of the current window as a base64 encoded string
which is useful in embedded images in HTML.
:Usage:
::
driver.get_screenshot_as_base64()
"""
return self.execute(Command.SCREENSHOT)['value']
def set_window_size(self, width, height, windowHandle='current') -> dict:
"""
Sets the width and height of the current window. (window.resizeTo)
:Args:
- width: the width in pixels to set the window to
- height: the height in pixels to set the window to
:Usage:
::
driver.set_window_size(800,600)
"""
if windowHandle != 'current':
warnings.warn("Only 'current' window is supported for W3C compatible browsers.")
self.set_window_rect(width=int(width), height=int(height))
def get_window_size(self, windowHandle='current') -> dict:
"""
Gets the width and height of the current window.
:Usage:
::
driver.get_window_size()
"""
if windowHandle != 'current':
warnings.warn("Only 'current' window is supported for W3C compatible browsers.")
size = self.get_window_rect()
if size.get('value', None):
size = size['value']
return {k: size[k] for k in ('width', 'height')}
def set_window_position(self, x, y, windowHandle='current') -> dict:
"""
Sets the x,y position of the current window. (window.moveTo)
:Args:
- x: the x-coordinate in pixels to set the window position
- y: the y-coordinate in pixels to set the window position
:Usage:
::
driver.set_window_position(0,0)
"""
if windowHandle != 'current':
warnings.warn("Only 'current' window is supported for W3C compatible browsers.")
return self.set_window_rect(x=int(x), y=int(y))
def get_window_position(self, windowHandle='current') -> dict:
"""
Gets the x,y position of the current window.
:Usage:
::
driver.get_window_position()
"""
if windowHandle != 'current':
warnings.warn("Only 'current' window is supported for W3C compatible browsers.")
position = self.get_window_rect()
return {k: position[k] for k in ('x', 'y')}
def get_window_rect(self) -> dict:
"""
Gets the x, y coordinates of the window as well as height and width of
the current window.
:Usage:
::
driver.get_window_rect()
"""
return self.execute(Command.GET_WINDOW_RECT)['value']
def set_window_rect(self, x=None, y=None, width=None, height=None) -> dict:
"""
Sets the x, y coordinates of the window as well as height and width of
the current window. This method is only supported for W3C compatible
browsers; other browsers should use `set_window_position` and
`set_window_size`.
:Usage:
::
driver.set_window_rect(x=10, y=10)
driver.set_window_rect(width=100, height=200)
driver.set_window_rect(x=10, y=10, width=100, height=200)
"""
if (x is None and y is None) and (not height and not width):
raise InvalidArgumentException("x and y or height and width need values")
return self.execute(Command.SET_WINDOW_RECT, {"x": x, "y": y,
"width": width,
"height": height})['value']
@property
def file_detector(self):
return self._file_detector
@file_detector.setter
def file_detector(self, detector):
"""
Set the file detector to be used when sending keyboard input.
By default, this is set to a file detector that does nothing.
see FileDetector
see LocalFileDetector
see UselessFileDetector
:Args:
- detector: The detector to use. Must not be None.
"""
if not detector:
raise WebDriverException("You may not set a file detector that is null")
if not isinstance(detector, FileDetector):
raise WebDriverException("Detector has to be instance of FileDetector")
self._file_detector = detector
@property
def orientation(self):
"""
Gets the current orientation of the device
:Usage:
::
orientation = driver.orientation
"""
return self.execute(Command.GET_SCREEN_ORIENTATION)['value']
@orientation.setter
def orientation(self, value):
"""
Sets the current orientation of the device
:Args:
- value: orientation to set it to.
:Usage:
::
driver.orientation = 'landscape'
"""
allowed_values = ['LANDSCAPE', 'PORTRAIT']
if value.upper() in allowed_values:
self.execute(Command.SET_SCREEN_ORIENTATION, {'orientation': value})
else:
raise WebDriverException("You can only set the orientation to 'LANDSCAPE' and 'PORTRAIT'")
@property
def application_cache(self):
""" Returns a ApplicationCache Object to interact with the browser app cache"""
return ApplicationCache(self)
@property
def log_types(self):
"""
Gets a list of the available log types. This only works with w3c compliant browsers.
:Usage:
::
driver.log_types
"""
return self.execute(Command.GET_AVAILABLE_LOG_TYPES)['value']
def get_log(self, log_type):
"""
Gets the log for a given log type
:Args:
- log_type: type of log that which will be returned
:Usage:
::
driver.get_log('browser')
driver.get_log('driver')
driver.get_log('client')
driver.get_log('server')
"""
return self.execute(Command.GET_LOG, {'type': log_type})['value']
@asynccontextmanager
async def bidi_connection(self):
assert sys.version_info >= (3, 7)
global cdp
import_cdp()
ws_url = None
if self.caps.get("se:cdp"):
ws_url = self.caps.get("se:cdp")
version = self.caps.get("se:cdpVersion").split(".")[0]
else:
version, ws_url = self._get_cdp_details()
if not ws_url:
raise WebDriverException("Unable to find url to connect to from capabilities")
cdp.import_devtools(version)
devtools = import_module("selenium.webdriver.common.devtools.v{}".format(version))
async with cdp.open_cdp(ws_url) as conn:
targets = await conn.execute(devtools.target.get_targets())
target_id = targets[0].target_id
async with conn.open_session(target_id) as session:
yield BidiConnection(session, cdp, devtools)
def _get_cdp_details(self):
import json
import urllib3
http = urllib3.PoolManager()
_firefox = False
if self.caps.get("browserName") == "chrome":
debugger_address = self.caps.get(f"{self.vendor_prefix}:{self.caps.get('browserName')}Options").get("debuggerAddress")
else:
_firefox = True
debugger_address = self.caps.get("moz:debuggerAddress")
res = http.request('GET', f"http://{debugger_address}/json/version")
data = json.loads(res.data)
browser_version = data.get("Browser")
websocket_url = data.get("webSocketDebuggerUrl")
import re
if _firefox:
# Mozilla Automation Team asked to only support 85
# until WebDriver Bidi is available.
version = 85
else:
version = re.search(r".*/(\d+)\.", browser_version).group(1)
return version, websocket_url
|
|
import os
from secret_settings import *
PROJECT_ROOT = os.path.abspath(os.path.join(os.path.dirname(__file__), os.pardir))
# PACKAGE_ROOT = os.path.abspath(os.path.join(PROJECT_ROOT, os.pardir))
PACKAGE_ROOT = PROJECT_ROOT
BASE_DIR = PROJECT_ROOT
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
# BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
DEBUG = False
#DEBUG = True
# Local time zone for this installation. Choices can be found here:
# http://en.wikipedia.org/wiki/List_of_tz_zones_by_name
# although not all choices may be available on all operating systems.
# On Unix systems, a value of None will cause Django to use the same
# timezone as the operating system.
# If running in a Windows environment this must be set to the same as your
# system time zone.
TIME_ZONE = "Asia/Shanghai"
# Language code for this installation. All choices can be found here:
# http://www.i18nguy.com/unicode/language-identifiers.html
# LANGUAGE_CODE = "en-us"
# LANGUAGE_CODE = "en-hans"
LANGUAGE_CODE = "zh-CN"
SITE_ID = int(os.environ.get("SITE_ID", 1))
# If you set this to False, Django will make some optimizations so as not
# to load the internationalization machinery.
USE_I18N = True
# If you set this to False, Django will not format dates, numbers and
# calendars according to the current locale.
USE_L10N = True
# If you set this to False, Django will not use timezone-aware datetimes.
USE_TZ = False
# Absolute filesystem path to the directory that will hold user-uploaded files.
# Example: "/home/media/media.lawrence.com/media/"
MEDIA_ROOT = os.path.join(PROJECT_ROOT, "site_media", "media")
# URL that handles the media served from MEDIA_ROOT. Make sure to use a
# trailing slash.
# Examples: "http://media.lawrence.com/media/", "http://example.com/media/"
MEDIA_URL = "/site_media/media/"
# Absolute path to the directory static files should be collected to.
# Don"t put anything in this directory yourself; store your static files
# in apps" "static/" subdirectories and in STATICFILES_DIRS.
# Example: "/home/media/media.lawrence.com/static/"
STATIC_ROOT = os.path.join(PROJECT_ROOT, "site_media")
# URL prefix for static files.
# Example: "http://media.lawrence.com/static/"
STATIC_URL = "/site_media/static/"
# Additional locations of static files
STATICFILES_DIRS = [
os.path.join(PROJECT_ROOT, "site_media", "static"),
]
# List of finder classes that know how to find static files in
# various locations.
STATICFILES_FINDERS = [
"django.contrib.staticfiles.finders.FileSystemFinder",
"django.contrib.staticfiles.finders.AppDirectoriesFinder",
]
TEMPLATES = [
{
"BACKEND": "django.template.backends.django.DjangoTemplates",
"DIRS": [
os.path.join(PROJECT_ROOT, "templates"),
],
"APP_DIRS": True,
"OPTIONS": {
"debug": DEBUG,
"context_processors": [
"django.contrib.auth.context_processors.auth",
"django.template.context_processors.debug",
"django.template.context_processors.i18n",
"django.template.context_processors.media",
"django.template.context_processors.static",
"django.template.context_processors.tz",
"django.template.context_processors.request",
"django.contrib.messages.context_processors.messages",
"account.context_processors.account",
"pinax_theme_bootstrap.context_processors.theme",
],
},
},
]
MIDDLEWARE_CLASSES = [
"django.contrib.sessions.middleware.SessionMiddleware",
"django.middleware.common.CommonMiddleware",
"django.middleware.csrf.CsrfViewMiddleware",
"django.contrib.auth.middleware.AuthenticationMiddleware",
"django.contrib.auth.middleware.SessionAuthenticationMiddleware",
"django.contrib.messages.middleware.MessageMiddleware",
"django.contrib.sites.middleware.CurrentSiteMiddleware",
"django.middleware.clickjacking.XFrameOptionsMiddleware",
"debug_toolbar.middleware.DebugToolbarMiddleware",
]
ROOT_URLCONF = "bojv4.urls"
# Python dotted path to the WSGI application used by Django's runserver.
WSGI_APPLICATION = "bojv4.wsgi.application"
INSTALLED_APPS = [
"django.contrib.admin",
"django.contrib.auth",
"django.contrib.contenttypes",
"django.contrib.messages",
"django.contrib.sessions",
"django.contrib.sites",
"django.contrib.staticfiles",
# theme
"bootstrapform",
"pinax_theme_bootstrap",
"bootstrap3",
# external
"account",
"pinax.eventlog",
"pinax.webanalytics",
"guardian",
"rest_framework",
# "bootstrap_pagination",
"avatar",
"easy_thumbnails",
"filer",
"mptt",
"django_select2",
"django_tables2",
"django_filters",
"debug_toolbar",
"crispy_forms",
# project
"bojv4",
"ojuser",
"problem",
"submission",
"common",
"contest",
"cheat",
"announcement",
]
# A sample logging configuration. The only tangible logging
# performed by this configuration is to send an email to
# the site admins on every HTTP 500 error when DEBUG=False.
# See http://docs.djangoproject.com/en/dev/topics/logging for
# more details on how to customize your logging configuration.
LOGGING = {
"version": 1,
"disable_existing_loggers": False,
"filters": {
"require_debug_false": {
"()": "django.utils.log.RequireDebugFalse"
}
},
'formatters': {
'standard': {
'format': '%(asctime)s [%(threadName)s:%(thread)d] [%(name)s:%(lineno)d] [%(levelname)s]- %(message)s'
},
},
"handlers": {
"mail_admins": {
"level": "ERROR",
"filters": ["require_debug_false"],
"class": "django.utils.log.AdminEmailHandler"
},
'default': {
'level': 'INFO',
'class': 'logging.handlers.RotatingFileHandler',
'filename': '/var/log/oj/all.log',
'maxBytes': 1024*1024*5,
'backupCount': 5,
'formatter' :'standard',
},
'error_handler': {
'level': 'ERROR',
'class': 'logging.handlers.RotatingFileHandler',
'filename': '/var/log/oj/error.log',
'maxBytes': 1024*1024*5,
'backupCount': 5,
'formatter':'standard',
},
'warning_handler': {
'level': 'WARNING',
'class': 'logging.handlers.RotatingFileHandler',
'filename': '/var/log/oj/warning.log',
'maxBytes': 1024*1024*5,
'backupCount': 5,
'formatter': 'standard',
},
'judge_handler': {
'level': 'INFO',
'class': 'logging.handlers.RotatingFileHandler',
'filename': '/var/log/oj/judge.log',
'maxBytes': 1024*1024*5,
'backupCount': 5,
'formatter': 'standard',
}
},
"loggers": {
"django.request": {
"handlers": ["mail_admins"],
"level": "ERROR",
"propagate": True,
},
"django": {
"handlers": ["default", "warning_handler", "error_handler"],
"level": "INFO",
"propagate": True,
},
"judge": {
"handlers": ["judge_handler"],
"level": "INFO",
"propagate": True,
}
}
}
FIXTURE_DIRS = [
os.path.join(PROJECT_ROOT, "fixtures"),
]
# ==================
ACCOUNT_OPEN_SIGNUP = True
ACCOUNT_EMAIL_UNIQUE = True
ACCOUNT_EMAIL_CONFIRMATION_REQUIRED = False
ACCOUNT_LOGIN_REDIRECT_URL = "/"
ACCOUNT_LOGOUT_REDIRECT_URL = "/"
ACCOUNT_EMAIL_CONFIRMATION_EXPIRE_DAYS = 2
ACCOUNT_USE_AUTH_AUTHENTICATE = True
# ==================
AUTHENTICATION_BACKENDS = [
"django.contrib.auth.backends.ModelBackend",
"account.auth_backends.UsernameAuthenticationBackend",
"guardian.backends.ObjectPermissionBackend",
# "filer.server.backends.default.DefaultServer",
]
CACHES = {
'default': {
'BACKEND': 'django_redis.cache.RedisCache',
'LOCATION': '127.0.0.1:6379',
"OPTIONS": {
"CLIENT_CLASS": "django_redis.client.DefaultClient",
},
},
}
REDIS_TIMEOUT=24*60*60
CUBES_REDIS_TIMEOUT=60*60
NEVER_REDIS_TIMEOUT=365*24*60*60
if DEBUG:
EMAIL_BACKEND = "django.core.mail.backends.console.EmailBackend"
else:
EMAIL_BACKEND = 'django.core.mail.backends.smtp.EmailBackend'
REST_FRAMEWORK = {
# Use Django's standard `django.contrib.auth` permissions,
# or allow read-only access for unauthenticated users.
'DEFAULT_PERMISSION_CLASSES': [
'rest_framework.permissions.DjangoModelPermissionsOrAnonReadOnly'
],
'DEFAULT_RENDERER_CLASSES': (
'rest_framework.renderers.JSONRenderer',
)
}
# ==============================================
THUMBNAIL_PROCESSORS = (
'easy_thumbnails.processors.colorspace',
'easy_thumbnails.processors.autocrop',
# 'easy_thumbnails.processors.scale_and_crop',
'filer.thumbnail_processors.scale_and_crop_with_subject_location',
'easy_thumbnails.processors.filters',
)
FILER_ENABLE_PERMISSIONS = True
FILER_CANONICAL_URL = 'sharing/'
# ==============================================
DEBUG_TOOLBAR_PATCH_SETTINGS = False
INTERNAL_IPS = ["10.105.243.4", "10.205.242.83", "127.0.0.1"]
|
|
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
""" BVT tests for Service offerings"""
# Import Local Modules
from marvin.codes import FAILED
from marvin.cloudstackTestCase import cloudstackTestCase
from marvin.cloudstackAPI import (changeServiceForVirtualMachine,
updateServiceOffering)
from marvin.lib.utils import (isAlmostEqual,
cleanup_resources,
random_gen)
from marvin.lib.base import (ServiceOffering,
Account,
VirtualMachine)
from marvin.lib.common import (list_service_offering,
list_virtual_machines,
get_domain,
get_zone,
get_template,
list_hosts)
from nose.plugins.attrib import attr
import time
from marvin.sshClient import SshClient
from marvin.lib.decoratorGenerators import skipTestIf
_multiprocess_shared_ = True
class TestCreateServiceOffering(cloudstackTestCase):
def setUp(self):
self.apiclient = self.testClient.getApiClient()
self.dbclient = self.testClient.getDbConnection()
self.cleanup = []
self.services = self.testClient.getParsedTestDataConfig()
def tearDown(self):
try:
# Clean up, terminate the created templates
cleanup_resources(self.apiclient, self.cleanup)
except Exception as e:
raise Exception("Warning: Exception during cleanup : %s" % e)
return
@attr(
tags=[
"advanced",
"advancedns",
"smoke",
"basic",
"eip",
"sg"],
required_hardware="false")
def test_01_create_service_offering(self):
"""Test to create service offering"""
# Validate the following:
# 1. createServiceOfferings should return a valid information
# for newly created offering
# 2. The Cloud Database contains the valid information
service_offering = ServiceOffering.create(
self.apiclient,
self.services["service_offerings"]["tiny"]
)
self.cleanup.append(service_offering)
self.debug(
"Created service offering with ID: %s" %
service_offering.id)
list_service_response = list_service_offering(
self.apiclient,
id=service_offering.id
)
self.assertEqual(
isinstance(list_service_response, list),
True,
"Check list response returns a valid list"
)
self.assertNotEqual(
len(list_service_response),
0,
"Check Service offering is created"
)
self.assertEqual(
list_service_response[0].cpunumber,
self.services["service_offerings"]["tiny"]["cpunumber"],
"Check server id in createServiceOffering"
)
self.assertEqual(
list_service_response[0].cpuspeed,
self.services["service_offerings"]["tiny"]["cpuspeed"],
"Check cpuspeed in createServiceOffering"
)
self.assertEqual(
list_service_response[0].displaytext,
self.services["service_offerings"]["tiny"]["displaytext"],
"Check server displaytext in createServiceOfferings"
)
self.assertEqual(
list_service_response[0].memory,
self.services["service_offerings"]["tiny"]["memory"],
"Check memory in createServiceOffering"
)
self.assertEqual(
list_service_response[0].name,
self.services["service_offerings"]["tiny"]["name"],
"Check name in createServiceOffering"
)
return
class TestServiceOfferings(cloudstackTestCase):
def setUp(self):
self.apiclient = self.testClient.getApiClient()
self.dbclient = self.testClient.getDbConnection()
self.cleanup = []
def tearDown(self):
try:
# Clean up, terminate the created templates
cleanup_resources(self.apiclient, self.cleanup)
except Exception as e:
raise Exception("Warning: Exception during cleanup : %s" % e)
return
@classmethod
def setUpClass(cls):
testClient = super(TestServiceOfferings, cls).getClsTestClient()
cls.apiclient = testClient.getApiClient()
cls.services = testClient.getParsedTestDataConfig()
cls.hypervisor = testClient.getHypervisorInfo()
domain = get_domain(cls.apiclient)
cls.zone = get_zone(cls.apiclient, testClient.getZoneForTests())
cls.services['mode'] = cls.zone.networktype
cls.service_offering_1 = ServiceOffering.create(
cls.apiclient,
cls.services["service_offerings"]["tiny"]
)
cls.service_offering_2 = ServiceOffering.create(
cls.apiclient,
cls.services["service_offerings"]["tiny"]
)
template = get_template(
cls.apiclient,
cls.zone.id,
cls.hypervisor
)
if template == FAILED:
assert False, "get_template() failed to return template"
# Set Zones and disk offerings
cls.services["small"]["zoneid"] = cls.zone.id
cls.services["small"]["template"] = template.id
# Create VMs, NAT Rules etc
cls.account = Account.create(
cls.apiclient,
cls.services["account"],
domainid=domain.id
)
cls.small_offering = ServiceOffering.create(
cls.apiclient,
cls.services["service_offerings"]["small"]
)
cls.medium_offering = ServiceOffering.create(
cls.apiclient,
cls.services["service_offerings"]["medium"]
)
cls.medium_virtual_machine = VirtualMachine.create(
cls.apiclient,
cls.services["small"],
accountid=cls.account.name,
domainid=cls.account.domainid,
serviceofferingid=cls.medium_offering.id,
mode=cls.services["mode"]
)
cls._cleanup = [
cls.small_offering,
cls.medium_offering,
cls.account
]
return
@classmethod
def tearDownClass(cls):
try:
cls.apiclient = super(
TestServiceOfferings,
cls).getClsTestClient().getApiClient()
# Clean up, terminate the created templates
cleanup_resources(cls.apiclient, cls._cleanup)
except Exception as e:
raise Exception("Warning: Exception during cleanup : %s" % e)
return
@attr(
tags=[
"advanced",
"advancedns",
"smoke",
"basic",
"eip",
"sg"],
required_hardware="false")
def test_02_edit_service_offering(self):
"""Test to update existing service offering"""
# Validate the following:
# 1. updateServiceOffering should return
# a valid information for newly created offering
# Generate new name & displaytext from random data
random_displaytext = random_gen()
random_name = random_gen()
self.debug("Updating service offering with ID: %s" %
self.service_offering_1.id)
cmd = updateServiceOffering.updateServiceOfferingCmd()
# Add parameters for API call
cmd.id = self.service_offering_1.id
cmd.displaytext = random_displaytext
cmd.name = random_name
self.apiclient.updateServiceOffering(cmd)
list_service_response = list_service_offering(
self.apiclient,
id=self.service_offering_1.id
)
self.assertEqual(
isinstance(list_service_response, list),
True,
"Check list response returns a valid list"
)
self.assertNotEqual(
len(list_service_response),
0,
"Check Service offering is updated"
)
self.assertEqual(
list_service_response[0].displaytext,
random_displaytext,
"Check server displaytext in updateServiceOffering"
)
self.assertEqual(
list_service_response[0].name,
random_name,
"Check server name in updateServiceOffering"
)
return
@attr(
tags=[
"advanced",
"advancedns",
"smoke",
"basic",
"eip",
"sg"],
required_hardware="false")
def test_03_delete_service_offering(self):
"""Test to delete service offering"""
# Validate the following:
# 1. deleteServiceOffering should return
# a valid information for newly created offering
self.debug("Deleting service offering with ID: %s" %
self.service_offering_2.id)
self.service_offering_2.delete(self.apiclient)
list_service_response = list_service_offering(
self.apiclient,
id=self.service_offering_2.id
)
self.assertEqual(
list_service_response,
None,
"Check if service offering exists in listDiskOfferings"
)
return
@attr(tags=["advanced", "advancedns", "smoke"], required_hardware="true")
def test_04_change_offering_small(self):
"""Test to change service to a small capacity
"""
# Validate the following
# 1. Log in to the Vm .We should see that the CPU and memory Info of
# this Vm matches the one specified for "Small" service offering.
# 2. Using listVM command verify that this Vm
# has Small service offering Id.
if self.hypervisor.lower() == "lxc":
self.skipTest("Skipping this test for {} due to bug CS-38153".format(self.hypervisor))
try:
self.medium_virtual_machine.stop(self.apiclient)
except Exception as e:
self.fail("Failed to stop VM: %s" % e)
cmd = changeServiceForVirtualMachine.changeServiceForVirtualMachineCmd()
cmd.id = self.medium_virtual_machine.id
cmd.serviceofferingid = self.small_offering.id
self.apiclient.changeServiceForVirtualMachine(cmd)
self.debug("Starting VM - ID: %s" % self.medium_virtual_machine.id)
self.medium_virtual_machine.start(self.apiclient)
# Ensure that VM is in running state
list_vm_response = list_virtual_machines(
self.apiclient,
id=self.medium_virtual_machine.id
)
if isinstance(list_vm_response, list):
vm = list_vm_response[0]
if vm.state == 'Running':
self.debug("VM state: %s" % vm.state)
else:
raise Exception(
"Failed to start VM (ID: %s) after changing\
service offering" % vm.id)
try:
ssh = self.medium_virtual_machine.get_ssh_client()
except Exception as e:
self.fail(
"SSH Access failed for %s: %s" %
(self.medium_virtual_machine.ipaddress, e)
)
cpuinfo = ssh.execute("cat /proc/cpuinfo")
cpu_cnt = len([i for i in cpuinfo if "processor" in i])
# 'cpu MHz\t\t: 2660.499'
cpu_speed = [i for i in cpuinfo if "cpu MHz" in i][0].split()[3]
meminfo = ssh.execute("cat /proc/meminfo")
# MemTotal: 1017464 kB
total_mem = [i for i in meminfo if "MemTotal" in i][0].split()[1]
self.debug(
"CPU count: %s, CPU Speed: %s, Mem Info: %s" % (
cpu_cnt,
cpu_speed,
total_mem
))
self.assertAlmostEqual(
int(cpu_cnt),
self.small_offering.cpunumber,
"Check CPU Count for small offering"
)
self.assertAlmostEqual(
list_vm_response[0].cpuspeed,
self.small_offering.cpuspeed,
"Check CPU Speed for small offering"
)
range = 25
if self.hypervisor.lower() == "hyperv":
range = 200
# TODO: Find the memory allocated to VM on hyperv hypervisor using
# powershell commands and use that value to equate instead of
# manipulating range, currently we get the memory count much less
# because of the UI component
self.assertTrue(
isAlmostEqual(int(int(total_mem) / 1024),
int(self.small_offering.memory),
range=range
),
"Check Memory(kb) for small offering"
)
return
class TestCpuCapServiceOfferings(cloudstackTestCase):
def setUp(self):
self.apiclient = self.testClient.getApiClient()
self.dbclient = self.testClient.getDbConnection()
self.cleanup = []
def tearDown(self):
try:
# Clean up, terminate the created templates
cleanup_resources(self.apiclient, self.cleanup)
except Exception as e:
raise Exception("Warning: Exception during cleanup : %s" % e)
return
def get_ssh_client(self, id, public_ip, username, password, retries):
""" Setup ssh client connection and return connection
vm requires attributes public_ip, public_port, username, password """
try:
ssh_client = SshClient(
public_ip,
22,
username,
password,
retries)
except Exception as e:
self.fail("Unable to create ssh connection: " % e)
self.assertIsNotNone(
ssh_client, "Failed to setup ssh connection to host=%s on public_ip=%s" % (id, public_ip))
return ssh_client
@classmethod
def setUpClass(cls):
testClient = super(TestCpuCapServiceOfferings, cls).getClsTestClient()
cls.apiclient = testClient.getApiClient()
cls.services = testClient.getParsedTestDataConfig()
cls.hypervisor = testClient.getHypervisorInfo()
cls._cleanup = []
cls.hypervisorNotSupported = False
if cls.hypervisor.lower() not in ["kvm"]:
cls.hypervisorNotSupported = True
return
domain = get_domain(cls.apiclient)
cls.zone = get_zone(cls.apiclient, testClient.getZoneForTests())
cls.services['mode'] = cls.zone.networktype
template = get_template(cls.apiclient, cls.zone.id, cls.hypervisor)
if template == FAILED:
assert False, "get_template() failed to return template"
cls.services["small"]["zoneid"] = cls.zone.id
cls.services["small"]["template"] = template.id
cls.services["small"]["hypervisor"] = cls.hypervisor
cls.hostConfig = cls.config.__dict__["zones"][0].__dict__["pods"][0].__dict__["clusters"][0].__dict__["hosts"][0].__dict__
cls.account = Account.create(
cls.apiclient,
cls.services["account"],
domainid=domain.id
)
offering_data = {
'displaytext': 'TestOffering',
'cpuspeed': 512,
'cpunumber': 2,
'name': 'TestOffering',
'memory': 1024
}
cls.offering = ServiceOffering.create(
cls.apiclient,
offering_data,
limitcpuuse=True
)
def getHost(self, hostId=None):
response = list_hosts(
self.apiclient,
type='Routing',
hypervisor='kvm',
id=hostId
)
# Check if more than one kvm hosts are available in order to successfully configure host-ha
if response and len(response) > 0:
self.host = response[0]
return self.host
raise self.skipTest("Not enough KVM hosts found, skipping host-ha test")
cls.host = getHost(cls)
cls.vm = VirtualMachine.create(
cls.apiclient,
cls.services["small"],
accountid=cls.account.name,
domainid=cls.account.domainid,
serviceofferingid=cls.offering.id,
mode=cls.services["mode"],
hostid=cls.host.id
)
cls._cleanup = [
cls.offering,
cls.account
]
@classmethod
def tearDownClass(cls):
try:
cls.apiclient = super(
TestCpuCapServiceOfferings,
cls).getClsTestClient().getApiClient()
# Clean up, terminate the created templates
cleanup_resources(cls.apiclient, cls._cleanup)
except Exception as e:
raise Exception("Warning: Exception during cleanup : %s" % e)
return
@skipTestIf("hypervisorNotSupported")
@attr(tags=["advanced", "advancedns", "smoke"], required_hardware="true")
def test_01_service_offering_cpu_limit_use(self):
"""
Test CPU Cap on KVM
"""
ssh_host = self.get_ssh_client(self.host.id, self.host.ipaddress, self.hostConfig["username"], self.hostConfig["password"], 10)
#Get host CPU usage from top command before and after VM consuming 100% CPU
find_pid_cmd = "ps -ax | grep '%s' | head -1 | awk '{print $1}'" % self.vm.id
pid = ssh_host.execute(find_pid_cmd)[0]
cpu_usage_cmd = "top -b n 1 p %s | tail -1 | awk '{print $9}'" % pid
host_cpu_usage_before_str = ssh_host.execute(cpu_usage_cmd)[0]
host_cpu_usage_before = round(float(host_cpu_usage_before_str))
self.debug("Host CPU usage before the infinite loop on the VM: " + str(host_cpu_usage_before))
#Execute loop command in background on the VM
ssh_vm = self.vm.get_ssh_client(reconnect=True)
ssh_vm.execute("echo 'while true; do x=$(($x+1)); done' > cputest.sh")
ssh_vm.execute("sh cputest.sh > /dev/null 2>&1 &")
time.sleep(5)
host_cpu_usage_after_str = ssh_host.execute(cpu_usage_cmd)[0]
host_cpu_usage_after = round(float(host_cpu_usage_after_str))
self.debug("Host CPU usage after the infinite loop on the VM: " + str(host_cpu_usage_after))
limit = 95
self.assertTrue(host_cpu_usage_after < limit, "Host CPU usage after VM usage increased is high")
return
|
|
#!/usr/bin/env python
import os
import re
import yaml
import threading
import itertools
import rospy
import rospkg
import roslaunch
from launchtree_loader import LaunchtreeLoader
from launchtree_config import LaunchtreeConfig, LaunchtreeArg, LaunchtreeRemap, LaunchtreeParam, LaunchtreeRosparam
from python_qt_binding import loadUi
from python_qt_binding.QtCore import Qt, Signal
from python_qt_binding.QtWidgets import QFileDialog, QWidget, QTreeWidgetItem
from python_qt_binding.QtGui import QIcon, QColor
class LaunchtreeEntryItem(QTreeWidgetItem):
_type_order = [dict, roslaunch.core.Node, LaunchtreeRosparam, roslaunch.core.Param, LaunchtreeRemap, LaunchtreeArg, object]
#inconsistent = False
def __init__(self, *args, **kw ):
super(LaunchtreeEntryItem, self).__init__(*args, **kw)
self.inconsistent = False
def __ge__(self, other):
own_type_idx = map(lambda t: isinstance(self.instance, t), self._type_order).index(True)
other_type_idx = map(lambda t: isinstance(other.instance, t), self._type_order).index(True)
if own_type_idx != other_type_idx:
return own_type_idx >= other_type_idx
return self.text(0) >= other.text(0)
def __lt__(self, other):
return not self.__ge__(other)
class LaunchtreeWidget(QWidget):
update_launch_view = Signal(object)
display_load_error = Signal(str, str)
def __init__(self, context):
super(LaunchtreeWidget, self).__init__()
self._rp = rospkg.RosPack()
self._rp_package_list = self._rp.list()
res_folder = os.path.join(self._rp.get_path('rqt_launch_editor'), 'resource')
ui_file = os.path.join(res_folder, 'editor_widget.ui')
loadUi(ui_file, self)
self._block_load = True
self.editor = 'gedit' # configure via settings
self.setObjectName('EditorWidget')
#self.reload_button.setIcon(QIcon.fromTheme('view-refresh'))
self._properties_empty_ui = os.path.join(res_folder, 'properties_empty.ui')
self._properties_param_ui = os.path.join(res_folder, 'properties_param.ui')
self._icon_include = QIcon(os.path.join(res_folder, 'img/include.png'))
self._icon_node = QIcon(os.path.join(res_folder, 'img/node.png'))
self._icon_param = QIcon(os.path.join(res_folder, 'img/param.png'))
self._icon_arg = QIcon(os.path.join(res_folder, 'img/arg.png'))
self._icon_remap = QIcon(os.path.join(res_folder, 'img/remap.png'))
self._icon_rosparam = QIcon(os.path.join(res_folder, 'img/rosparam_load.png'))
self._icon_default = QIcon(os.path.join(res_folder, 'img/default.png'))
self._icon_warn = QIcon(os.path.join(res_folder, 'img/warn.png'))
self._launch_separator = ' -- '
self._highlight_color = QColor(255, 255, 150)
self._neutral_color = QColor(255, 255, 255, 0)
# connect signals
self.update_launch_view.connect(self._update_launch_view)
self.display_load_error.connect(self._display_load_error)
self.package_select.currentIndexChanged.connect(self.update_launchfiles)
self.launchfile_select.currentIndexChanged.connect(lambda idx: self.load_launchfile())
#self.reload_button.clicked.connect(self.load_launchfile)
self.open_button.clicked.connect(self._root_open_clicked)
self.launch_view.currentItemChanged.connect(self.launch_entry_changed)
"""self.filter_nodes.toggled.connect(lambda t: self._filter_launch_view())
self.filter_params.toggled.connect(lambda t: self._filter_launch_view())
self.filter_args.toggled.connect(lambda t: self._filter_launch_view())
self.filter_remaps.toggled.connect(lambda t: self._filter_launch_view())
self.filter_empty.toggled.connect(lambda t: self._filter_launch_view())
self.search_input.textChanged.connect(lambda t: self._filter_launch_view(collapse=t==''))"""
#self.launch_open_button.clicked.connect(self._launch_open_clicked)
self.reset()
def reset(self):
self._launch_config = LaunchtreeConfig()
self._package_list = list()
self._load_thread = None
#self.properties_content.setCurrentIndex(0)
self.main_view.setCurrentIndex(0)
self.update_package_list()
def block_load(self, do_block):
self._block_load = do_block
def load_launchfile(self):
if self._block_load: return
self.launch_view.clear()
#self.properties_content.setCurrentIndex(0)
self.main_view.setCurrentIndex(0)
filename = os.path.join(
self._rp.get_path(self.package_select.currentText()),
self.launchfile_select.currentText()
)
#launchargs = roslaunch.substitution_args.resolve_args(self.args_input.text()).split(' ')
if os.path.isfile(filename):
self.progress_bar.setRange(0,0)
self._load_thread = threading.Thread(target=self._load_launch_items, args=[filename, []])
self._load_thread.daemon = True
self._load_thread.start()
def _load_launch_items(self, filename, launchargs):
self._launch_config = LaunchtreeConfig()
items = list()
try:
#delete next 2 lines and may be able to delete loader dependency?
#loader = LaunchtreeLoader()
#loader.load(filename, self._launch_config, verbose=False, argv=['','',''] + launchargs)
items = self.display_config_tree(self._launch_config.tree)
self.update_launch_view.emit(items)
except Exception as e:
error_msg = re.sub(r'(\[?(?:/\w+)+\.launch\]?)',
lambda m: '[%s]'%self._filename_to_label(m.group(0)),
str(e)
)
help_msg = ''
#if 'arg to be set' in str(e):
# help_msg = 'You can pass args to the root launch file by specifying them in the "args" input field, for example "arg_key:=arg_value".'
self.display_load_error.emit(error_msg, help_msg)
def display_config_tree(self, config_tree):
items = list()
for key, instance in config_tree.items():
if key == '_root': continue
i = LaunchtreeEntryItem()
i.instance = instance
if isinstance(i.instance, roslaunch.core.Param):
i.inconsistent = i.instance.inconsistent
if isinstance(instance, dict):
childItems = self.display_config_tree(instance)
i.inconsistent = any(c.inconsistent for c in childItems)
i.addChildren(childItems)
i.instance = instance.get('_root', instance)
if isinstance(i.instance, dict):
i.setText(0, self._filename_to_label(key.split(':')[0]))
i.setIcon(0, self._icon_include if not i.inconsistent else self._icon_warn)
else:
i.setText(0, self._filename_to_label(key.split(':')[0]) if isinstance(i.instance, LaunchtreeRosparam) else
key.split(':')[0])
i.setIcon(0,
self._icon_warn if i.inconsistent else
self._icon_node if isinstance(i.instance, roslaunch.core.Node) else
self._icon_param if isinstance(i.instance, roslaunch.core.Param) else
self._icon_arg if isinstance(i.instance, LaunchtreeArg) else
self._icon_remap if isinstance(i.instance, LaunchtreeRemap) else
self._icon_rosparam if isinstance(i.instance, LaunchtreeRosparam) else
self._icon_default)
items.append(i)
return items
def _display_load_error(self, error_msg, help_msg):
self.error_label.setText(error_msg)
self.help_label.setText(help_msg)
self.main_view.setCurrentIndex(1)
def _update_launch_view(self, items):
self.launch_view.clear()
self.launch_view.addTopLevelItems(items)
#self.launch_view.sortItems(0, Qt.AscendingOrder)
#self._filter_launch_view()
self.progress_bar.setRange(0,1)
self.progress_bar.setValue(1)
self._load_thread = None
def update_package_list(self):
self._package_list = sorted(
filter(lambda p: len(self._get_launch_files(self._rp.get_path(p)))>0,
self._rp_package_list
)
)
self.package_select.clear()
self.package_select.addItems(self._package_list)
self.package_select.setCurrentIndex(0)
def update_launchfiles(self, idx):
package = self.package_select.itemText(idx)
folder = self._rp.get_path(package)
launchfiles = self._get_launch_files(folder)
self.launchfile_select.clear()
self.launchfile_select.addItems(launchfiles)
def _get_launch_files(self, path):
return sorted(
itertools.imap(lambda p: p.replace(path + '/', ''),
itertools.ifilter(self._is_launch_file,
itertools.chain.from_iterable(
itertools.imap(lambda f:
map(lambda n: os.path.join(f[0], n), f[2]),
os.walk(path)
)
)
)
)
)
def _is_launch_file(self, path):
if not os.path.isfile(path): return False
(root, ext) = os.path.splitext(path)
if ext != '.launch': return False
return True
def launch_entry_changed(self, current, previous):
#clear properties
if current is None:
return
data = current.instance
if isinstance(data, dict) and data.has_key('_root'):
data = data['_root']
if isinstance(data, roslaunch.core.Param):
self.properties_content.setCurrentIndex(1)
self.param_name.setText(data.key.split('/')[-1] + ':')
if isinstance(data.value, list):
self.param_value_list.clear()
self.param_value_list.addItems(list(str(v) for v in data.value))
self.param_value_panel.setCurrentIndex(2)
elif len(str(data.value)) < 100:
self.param_value.setText(str(data.value))
self.param_value_panel.setCurrentIndex(0)
else:
self.param_value_long.setPlainText(str(data.value))
self.param_value_panel.setCurrentIndex(1)
elif isinstance(data, roslaunch.core.Node):
self.properties_content.setCurrentIndex(2)
self.node_package.setText(data.package)
self.node_type.setText(data.type)
self.node_namespace.setText(str(data.namespace))
self.node_args.setText(str(data.args))
self.node_args.setEnabled(data.args != '')
self.node_prefix.setText(str(data.launch_prefix) if data.launch_prefix is not None else '')
self.node_prefix.setEnabled(data.launch_prefix is not None)
self.node_machine.setText(str(data.machine_name) if data.machine_name is not None else '')
self.node_machine.setEnabled(data.machine_name is not None)
elif isinstance(data, LaunchtreeArg):
self.properties_content.setCurrentIndex(4)
self.arg_name.setText(data.name)
self.arg_value.setText(str(data.value) if data.value is not None else '')
self.arg_default.setText(str(data.default) if data.default is not None else '')
self.arg_doc.setText(str(data.doc) if data.doc is not None else '')
self.arg_value.setEnabled(data.value is not None)
self.arg_default.setEnabled(not self.arg_value.isEnabled())
elif isinstance(data, LaunchtreeRemap):
self.properties_content.setCurrentIndex(5)
self.remap_from.setText(data.from_topic)
self.remap_to.setText(data.to_topic)
elif isinstance(data, roslaunch.core.Machine):
self.properties_content.setCurrentIndex(6)
self.machine_address.setText(str(data.address))
self.machine_port.setText(str(data.ssh_port))
self.machine_user.setText(str(data.user) if data.user is not None else '')
self.machine_user.setEnabled(data.user is not None)
self.machine_loader.setText(str(data.env_loader) if data.env_loader is not None else '')
self.machine_loader.setEnabled(data.env_loader is not None)
elif isinstance(data, LaunchtreeRosparam):
self.properties_content.setCurrentIndex(3)
path_segments = self.launch_view.currentItem().text(0).split(self._launch_separator)
if len(path_segments) == 2:
(p, l) = path_segments
(d, f) = os.path.split(l)
else:
p = None
f = path_segments[0]
self.file_package.setText(p if p is not None else '')
self.file_package.setEnabled(p is not None)
self.file_name.setText(f)
elif isinstance(data, dict):
self.properties_content.setCurrentIndex(3)
(p, l) = self.launch_view.currentItem().text(0).split(self._launch_separator)
(d, f) = os.path.split(l)
self.file_package.setText(p)
self.file_name.setText(f)
else:
self.properties_content.setCurrentIndex(0)
def _filter_launch_view(self, collapse=False):
show_nodes = self.filter_nodes.isChecked()
show_params = self.filter_params.isChecked()
show_args = self.filter_args.isChecked()
show_remaps = self.filter_remaps.isChecked()
show_empty = self.filter_empty.isChecked()
search_text = self.search_input.text()
highlight = search_text != ''
expand = not collapse and highlight
def filter_launch_entry(entry):
show = False
# param
if isinstance(entry.instance, roslaunch.core.Param):
show = show_params
# node
elif isinstance(entry.instance, roslaunch.core.Node):
show = show_nodes
# machine (no separate option to display machines, is coupled to nodes)
elif isinstance(entry.instance, roslaunch.core.Machine):
show = show_nodes
# arg
elif isinstance(entry.instance, LaunchtreeArg):
show = show_args
# remap
elif isinstance(entry.instance, LaunchtreeRemap):
show = show_remaps
show &= search_text in entry.text(0)
if show:
entry.setBackground(0, self._highlight_color if highlight else self._neutral_color)
if entry.childCount() > 0:
not_empty = any(map(filter_launch_entry, map(entry.child, range(entry.childCount()))))
show |= show_empty or not_empty
entry.setExpanded(not collapse and (expand or entry.isExpanded()))
entry.setHidden(not show)
return show
for idx in range(self.launch_view.topLevelItemCount()):
filter_launch_entry(self.launch_view.topLevelItem(idx))
def _launch_open_clicked(self):
(p, l) = self.launch_view.currentItem().text(0).split(self._launch_separator)
filename = os.path.join(self._rp.get_path(p), l)
thread = threading.Thread(target=os.system, args=['%s %s' % (self.editor, filename)])
thread.daemon = True
thread.start()
def _root_open_clicked(self):
filename = os.path.join(
self._rp.get_path(self.package_select.currentText()),
self.launchfile_select.currentText()
)
thread = threading.Thread(target=os.system, args=['%s %s' % (self.editor, filename)])
thread.daemon = True
thread.start()
def shutdown(self):
pass
def _filename_to_label(self, filename):
tail = list()
for d in reversed(filename.split('/')):
if d in self._rp_package_list:
return '%s%s%s' % (d, self._launch_separator, '/'.join(reversed(tail)))
else:
tail.append(d)
return filename
|
|
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
r"""Benchmarks for low-level eager execution primitives.
To run CPU benchmarks:
bazel run -c opt benchmarks_test -- --benchmarks=.
To run GPU benchmarks:
bazel run --config=cuda -c opt --copt="-mavx" benchmarks_test -- \
--benchmarks=.
To run a subset of benchmarks using --benchmarks flag.
--benchmarks: the list of benchmarks to run. The specified value is interpreted
as a regular expression and any benchmark whose name contains a partial match
to the regular expression is executed.
e.g. --benchmarks=".*matmul*." will run all matmul related benchmarks.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import time
import numpy as np
import six
from six.moves import xrange # pylint: disable=redefined-builtin
from tensorflow.python import pywrap_tfe
from tensorflow.python.eager import backprop # pylint: disable=unused-import
from tensorflow.python.eager import benchmarks_test_base
from tensorflow.python.eager import context
from tensorflow.python.eager import core
from tensorflow.python.eager import def_function
from tensorflow.python.eager import forwardprop
from tensorflow.python.eager import function
from tensorflow.python.eager import test
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.framework import tensor_shape
from tensorflow.python.framework import tensor_spec
from tensorflow.python.framework import test_util
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import control_flow_ops
from tensorflow.python.ops import functional_ops
from tensorflow.python.ops import gen_array_ops
from tensorflow.python.ops import gen_math_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import nn_ops
from tensorflow.python.ops import random_ops
from tensorflow.python.ops import resource_variable_ops
from tensorflow.python.util import nest
from tensorflow.python.util import tf_inspect
CPU = "/device:CPU:0"
GPU = "/device:GPU:0"
GLOBAL_TEST_VALUE = None
def c_tfe_py_fastpath_execute(a,
b,
transpose_a=False,
transpose_b=False,
name=None):
ctx = context.context()
assert ctx.executing_eagerly(
), "The prototype doesn't contain C code for graph construction"
try:
return pywrap_tfe.TFE_Py_FastPathExecute(ctx,
"MatMul", name,
a, b, "transpose_a", transpose_a,
"transpose_b", transpose_b)
except core._NotOkStatusException as e:
if name is not None:
message = e.message + " name: " + name
else:
message = e.message
six.raise_from(core._status_to_exception(e.code, message), None)
def run_benchmark(func, num_iters, execution_mode=None):
ctx = context.context()
with context.execution_mode(execution_mode):
# call func to warm up
func()
if execution_mode == context.ASYNC:
ctx.executor.wait()
start = time.time()
for _ in xrange(num_iters):
func()
if execution_mode == context.ASYNC:
ctx.executor.wait()
end = time.time()
return end - start
class MicroBenchmarks(benchmarks_test_base.MicroBenchmarksBase):
def __init__(self):
# used for multiply benchmarks
self._m_2 = random_ops.random_uniform([2])
# used for matmul benchmarks
self._m_2_by_2 = random_ops.random_uniform((2, 2))
self._m_100_by_784 = random_ops.random_uniform((100, 784))
self._num_iters_2_by_2 = 30000
self._num_iters_100_by_784 = 30000
# used for conv2d benchmarks
self._m_8_28_28_3 = random_ops.random_uniform((8, 28, 28, 3))
self._m_1_3_3_1 = random_ops.random_uniform((1, 3, 3, 1))
def _get_benchmark_name(self):
"""Mostly copied from benchmark.py _get_name()."""
stack = tf_inspect.stack()
name = None
for frame in stack[::-1]:
f_locals = frame[0].f_locals
f_self = f_locals.get("self", None)
if isinstance(f_self, test.Benchmark):
name = frame[3] # Get the method name
# This is a hack to get around the fact that some methods might have a
# disable_tfrt decorator around them. In that case a function called
# 'decorated' wraps the real called function underneath and so we
# peek one deeper into the stack to get the real name.
if name == "decorated":
continue
else:
break
if name is None:
raise ValueError("Unable to determine calling Benchmark function.")
if context.is_tfrt_enabled():
name = name + "_tfrt"
return name
def _run(self, func, num_iters, execution_mode=None):
self.run_report(run_benchmark, func, num_iters, execution_mode)
def benchmark_create_np_array(self):
func = lambda: np.array([3.0])
self._run(func, 30000)
def _benchmark_create_tensor(self, value, dtype, device):
"""Benchmark overheads of creating a Tensor object."""
if device == GPU:
# Warmup the GPU
ops.EagerTensor(value, device=device)
def func():
ops.EagerTensor(value, device=device, dtype=dtype)
self._run(func, 30000)
def _benchmark_create_constant(self, value, dtype, cached=True):
global GLOBAL_TEST_VALUE
GLOBAL_TEST_VALUE = value
def cached_func():
constant_op.constant(value, dtype=dtype)
def uncached_func():
global GLOBAL_TEST_VALUE
GLOBAL_TEST_VALUE += 1
constant_op.constant(GLOBAL_TEST_VALUE, dtype=dtype)
func = cached_func if cached else uncached_func
with ops.device("GPU:0" if context.num_gpus() else "CPU:0"):
for _ in range(1000):
func() # Warmup.
self._run(func, 3000)
def benchmark_create_float_constant(self):
self._benchmark_create_constant(42.0, dtype=None)
def benchmark_create_float_constant_uncached(self):
self._benchmark_create_constant(42.0, dtype=None, cached=False)
def benchmark_create_int32_constant(self):
if context.num_gpus():
return # int32 constants are always allocated on CPU.
self._benchmark_create_constant(42, dtype=dtypes.int32)
def benchmark_create_int32_constant_uncached(self):
if context.num_gpus():
return # int32 constants are always allocated on CPU.
self._benchmark_create_constant(42, dtype=dtypes.int32, cached=False)
def _benchmark_add(self, a, b):
def func():
return memoryview(math_ops.add_v2(a, b))
with ops.device("GPU:0" if context.num_gpus() else "CPU:0"):
for _ in range(1000):
func() # Warmup.
self._run(func, 30000)
def _benchmark_add_operator_overload(self, a, b):
def func():
return memoryview(a + b)
with ops.device("GPU:0" if context.num_gpus() else "CPU:0"):
for _ in range(1000):
func() # Warmup.
self._run(func, 30000)
def benchmark_add_float_scalars(self):
self._benchmark_add(42.0, 24.0)
def benchmark_add_int32_scalars(self):
self._benchmark_add(42, 24)
def benchmark_add_float_scalar_tensor(self):
tensor_a = constant_op.constant(42.0)
tensor_b = constant_op.constant(24.0)
self._benchmark_add(tensor_a, tensor_b)
def benchmark_add_float_scalar_tensor_overloaded_operator(self):
tensor_a = constant_op.constant(42.0)
tensor_b = constant_op.constant(24.0)
self._benchmark_add_operator_overload(tensor_a, tensor_b)
def benchmark_add_int32_scalar_tensor(self):
tensor_a = constant_op.constant(42)
tensor_b = constant_op.constant(24)
self._benchmark_add(tensor_a, tensor_b)
def benchmark_add_float_dense_tensor(self):
tensor_a = constant_op.constant([[42.0, 42.0], [42.0, 42.0]])
tensor_b = constant_op.constant([[24.0, 24.0], [24.0, 24.0]])
self._benchmark_add(tensor_a, tensor_b)
def benchmark_add_int32_dense_tensor(self):
tensor_a = constant_op.constant([[42, 42], [42, 42]])
tensor_b = constant_op.constant([[24, 24], [24, 24]])
self._benchmark_add(tensor_a, tensor_b)
def benchmark_create_float_tensor_from_list_CPU(self):
self._benchmark_create_tensor([[3.0]], dtypes.float32.as_datatype_enum, CPU)
def benchmark_create_float_tensor_from_np_array_CPU(self):
self._benchmark_create_tensor(
np.array([[3.0]], dtype=np.float32), dtypes.float32.as_datatype_enum,
CPU)
def benchmark_create_int32_tensor_from_list_CPU(self):
self._benchmark_create_tensor([[3]], dtypes.int32.as_datatype_enum, CPU)
def benchmark_create_int32_tensor_from_np_array_CPU(self):
self._benchmark_create_tensor(
np.array([[3]], dtype=np.int32), dtypes.int32.as_datatype_enum, CPU)
def benchmark_create_float_tensor_from_list_GPU(self):
if not context.num_gpus():
return
self._benchmark_create_tensor([[3.0]], dtypes.float32.as_datatype_enum, GPU)
def benchmark_create_float_tensor_from_np_array_GPU(self):
if not context.num_gpus():
return
self._benchmark_create_tensor(
np.array([[3.0]], dtype=np.float32), dtypes.float32.as_datatype_enum,
GPU)
def benchmark_create_int32_tensor_from_list_GPU(self):
# int32's are kept on host memory even when executing on GPU.
if not context.num_gpus():
return
self._benchmark_create_tensor([[3]], dtypes.int32.as_datatype_enum, GPU)
def benchmark_create_int32_tensor_from_np_array_GPU(self):
# int32's are kept on host memory even when executing on GPU.
if not context.num_gpus():
return
self._benchmark_create_tensor(
np.array([[3]], dtype=np.int32), dtypes.int32.as_datatype_enum, GPU)
def benchmark_index_tensor_with_literal(self):
func = lambda: constant_op.constant([3.0])[0]
self._run(func, 30000)
def benchmark_index_tensor_with_tensor(self):
func = lambda idx=constant_op.constant(0): constant_op.constant([3.0])[idx]
self._run(func, 30000)
def benchmark_index_tensor_with_np_array(self):
func = lambda idx=np.array(0): constant_op.constant([3.0])[idx]
self._run(func, 30000)
def _benchmark_np_multiply(self, m, num_iters):
a = m.cpu().numpy()
func = lambda: a * a
self._run(func, num_iters)
def _benchmark_tf_multiply(self, m, num_iters):
func = lambda: m * m
self._run(func, num_iters)
def _benchmark_tf_conv2d(self, m1, m2, num_iters):
func = lambda: nn_ops.conv2d(m1, m2, strides=[1, 1, 1, 1], padding="VALID")
self._run(func, num_iters)
def _benchmark_tf_multiply_op(self, m, num_iters):
func = lambda: math_ops.multiply(m, m)
self._run(func, num_iters)
def benchmark_np_multiply(self):
self._benchmark_np_multiply(self._m_2, 30000)
def benchmark_tf_multiply_CPU(self):
with context.device(CPU):
m = self._m_2.cpu()
self._benchmark_tf_multiply(m, 30000)
def benchmark_tf_multiply_GPU(self):
if not context.num_gpus():
return
with context.device(GPU):
m = self._m_2.gpu()
self._benchmark_tf_multiply(m, 30000)
def benchmark_tf_multiply_op_CPU(self):
with context.device(CPU):
m = self._m_2.cpu()
self._benchmark_tf_multiply_op(m, 30000)
def benchmark_tf_multiply_op_GPU(self):
if not context.num_gpus():
return
with context.device(GPU):
m = self._m_2.gpu()
self._benchmark_tf_multiply_op(m, 30000)
def benchmark_tf_conv2d_CPU(self):
with context.device(CPU):
m1 = self._m_8_28_28_3.cpu()
m2 = self._m_1_3_3_1.cpu()
self._benchmark_tf_conv2d(m1, m2, 30000)
def benchmark_tf_conv2d_GPU(self):
if not context.num_gpus():
return
with context.device(GPU):
m1 = self._m_8_28_28_3.gpu()
m2 = self._m_1_3_3_1.gpu()
self._benchmark_tf_conv2d(m1, m2, 30000)
def benchmark_tf_identity(self):
m = self._m_2
self._run(lambda: gen_array_ops.identity(m), 30000)
def benchmark_slowpath_tf_identity(self):
self._run(lambda: gen_array_ops.identity(1), 30000)
def benchmark_tfe_py_execute_identity(self):
m = self._m_2
ctx_handle = context.context()._handle
attrs = ("T", self._m_2.dtype.as_datatype_enum)
inputs = [m]
def f():
pywrap_tfe.TFE_Py_Execute(ctx_handle, None, "Identity", inputs, attrs, 1)
self._run(f, 30000)
def benchmark_tf_gradient_function_identity(self):
with context.device(CPU):
m = gen_array_ops.identity(self._m_2)
self._run(
lambda: backprop.gradients_function(gen_array_ops.identity, [0])(m),
30000)
def benchmark_tf_gradient_forward_identity(self):
with backprop.GradientTape() as tape:
m = self._m_2
tape.watch(m)
self._run(lambda: gen_array_ops.identity(m), 30000)
def benchmark_tf_gradient_tape_push_pop(self):
def f():
with backprop.GradientTape():
pass
self._run(f, 30000)
def benchmark_tf_gradient_function_no_op(self):
with context.device(CPU):
m = gen_array_ops.identity(self._m_2)
self._run(lambda: backprop.gradients_function(lambda x: x, [0])(m), 30000)
def _benchmark_np_matmul(self, m, transpose_b, num_iters):
a = m.cpu().numpy()
b = a.T if transpose_b else a
func = lambda: np.dot(a, b)
self._run(func, num_iters)
def _benchmark_tf_matmul(self, m, transpose_b, num_iters,
execution_mode=None):
func = lambda: math_ops.matmul(m, m, transpose_b=transpose_b)
self._run(func, num_iters, execution_mode=execution_mode)
def _benchmark_gen_math_ops_matmul(self, m, transpose_b, num_iters):
def func():
gen_math_ops.mat_mul(m, m, transpose_b=transpose_b)
self._run(func, num_iters)
def _benchmark_tfe_py_fastpath_execute_matmul(self, m, transpose_b,
num_iters):
def func():
c_tfe_py_fastpath_execute(m, m, transpose_b=transpose_b)
self._run(func, num_iters)
def _benchmark_tfe_py_execute_matmul(self, m, transpose_b, num_iters):
inputs = [m, m]
# pylint: disable=protected-access
ctx_handle = context.context()._handle
# pylint: enable=protected-access
device = context.context().device_name
attrs = ("transpose_a", False, "transpose_b", transpose_b, "T",
m.dtype.as_datatype_enum)
def func():
pywrap_tfe.TFE_Py_Execute(ctx_handle, device, "MatMul", inputs, attrs, 1)
self._run(func, num_iters)
def _benchmark_defun_matmul(self,
m,
transpose_b,
num_iters,
execution_mode=None):
f = function.defun(math_ops.matmul)
func = lambda: f(m, m, transpose_b=transpose_b)
self._run(func, num_iters, execution_mode=execution_mode)
def _benchmark_defun_matmul_with_signature(self,
m,
num_iters,
execution_mode=None):
@def_function.function(
input_signature=[tensor_spec.TensorSpec([2, 2], dtypes.float32)])
def defun_matmul(m):
return math_ops.matmul(m, m)
func = lambda: defun_matmul(m)
self._run(func, num_iters, execution_mode=execution_mode)
def _benchmark_defun_matmul_relaxed_shape(self,
m,
num_iters,
execution_mode=None):
@def_function.function(experimental_relax_shapes=True)
def defun_matmul(m):
return math_ops.matmul(m, m)
m_3_by_3 = random_ops.random_uniform((3, 3))
defun_matmul(m_3_by_3)
func = lambda: defun_matmul(m)
self._run(func, num_iters, execution_mode=execution_mode)
def _benchmark_defun_args_matmul(self, m, num_iters, execution_mode=None):
@def_function.function
def defun_matmul(m):
return math_ops.matmul(m, m)
func = lambda: defun_matmul(m)
self._run(func, num_iters, execution_mode=execution_mode)
def _benchmark_nested_defun_matmul(self, m, transpose_b, num_iters):
inner = function.defun(math_ops.matmul)
@function.defun
def outer(a, b, c, transpose_b):
return math_ops.matmul(inner(a, b, transpose_b=transpose_b), c)
func = lambda: outer(m, m, m, transpose_b=transpose_b)
# Warmup before benchmark
for _ in range(1000):
func()
self._run(func, num_iters)
def _benchmark_defun_matmul_forward_backward(self,
m,
transpose_b,
num_iters,
execution_mode=None):
f = def_function.function(math_ops.matmul)
def func():
with backprop.GradientTape() as gt:
gt.watch(m)
y = f(m, m, transpose_b=transpose_b)
_ = gt.gradient(y, m)
self._run(func, num_iters, execution_mode=execution_mode)
def _benchmark_read_variable(self, m, num_iters):
self._run(m.value, num_iters)
def _benchmark_matmul_read_variable(self, m, num_iters):
self._benchmark_gen_math_ops_matmul(
m, transpose_b=False, num_iters=num_iters)
def _benchmark_matmul_read_variable_with_tape(self, m, num_iters):
with backprop.GradientTape() as tape:
tape.watch(m)
self._benchmark_gen_math_ops_matmul(
m, transpose_b=False, num_iters=num_iters)
def _benchmark_read_variable_with_tape(self, m, num_iters):
with backprop.GradientTape() as tape:
tape.watch(m)
self._run(m.value, num_iters)
# Benchmarks for A^2, A of dimension 2 by 2.
def benchmark_np_matmul_2_by_2(self):
self._benchmark_np_matmul(
self._m_2_by_2, transpose_b=False, num_iters=self._num_iters_2_by_2)
def benchmark_tf_matmul_2_by_2_CPU(self):
with context.device(CPU):
m = self._m_2_by_2.cpu()
self._benchmark_tf_matmul(
m, transpose_b=False, num_iters=self._num_iters_2_by_2)
def benchmark_tf_matmul_2_by_2_CPU_async(self):
with context.device(CPU):
m = self._m_2_by_2.cpu()
self._benchmark_tf_matmul(
m,
transpose_b=False,
num_iters=self._num_iters_2_by_2,
execution_mode=context.ASYNC)
def benchmark_gen_math_ops_matmul_2_by_2_CPU(self):
with context.device(CPU):
m = self._m_2_by_2.cpu()
self._benchmark_gen_math_ops_matmul(
m, transpose_b=False, num_iters=self._num_iters_2_by_2)
def benchmark_tfe_py_fastpath_execute_matmul_2_by_2_CPU(self):
with context.device(CPU):
m = self._m_2_by_2.cpu()
self._benchmark_tfe_py_fastpath_execute_matmul(
m, transpose_b=False, num_iters=self._num_iters_2_by_2)
def benchmark_tfe_py_execute_matmul_2_by_2_CPU(self):
with context.device(CPU):
m = self._m_2_by_2.cpu()
self._benchmark_tfe_py_execute_matmul(
m, transpose_b=False, num_iters=self._num_iters_2_by_2)
def benchmark_defun_matmul_2_by_2_CPU(self):
with context.device(CPU):
m = self._m_2_by_2.cpu()
self._benchmark_defun_matmul(
m, transpose_b=False, num_iters=self._num_iters_2_by_2)
def benchmark_defun_matmul_2_by_2_with_signature_CPU(self):
with context.device(CPU):
m = self._m_2_by_2.cpu()
self._benchmark_defun_matmul_with_signature(
m, num_iters=self._num_iters_2_by_2)
def benchmark_defun_matmul_2_by_2_relaxed_shape_CPU(self):
with context.device(CPU):
m = self._m_2_by_2.cpu()
self._benchmark_defun_matmul_relaxed_shape(
m, num_iters=self._num_iters_2_by_2)
def benchmark_defun_args_matmul_2_by_2_CPU(self):
with context.device(CPU):
m = self._m_2_by_2.cpu()
self._benchmark_defun_args_matmul(m, num_iters=self._num_iters_2_by_2)
def benchmark_defun_matmul_2_by_2_CPU_async(self):
with context.device(CPU):
m = self._m_2_by_2.cpu()
self._benchmark_defun_matmul(
m,
transpose_b=False,
num_iters=self._num_iters_2_by_2,
execution_mode=context.ASYNC)
def _benchmark_matmul_forward_backward_2_by_2_CPU(self, run_eager=False):
def_function.run_functions_eagerly(run_eager)
with context.device(CPU):
m = self._m_2_by_2.cpu()
self._benchmark_defun_matmul_forward_backward(
m, transpose_b=False, num_iters=self._num_iters_2_by_2)
def_function.run_functions_eagerly(False)
def _benchmark_matmul_forward_backward_2_by_2_CPU_async(
self, run_eager=False):
def_function.run_functions_eagerly(run_eager)
with context.device(CPU):
m = self._m_2_by_2.cpu()
self._benchmark_defun_matmul_forward_backward(
m,
transpose_b=False,
num_iters=self._num_iters_2_by_2,
execution_mode=context.ASYNC)
def benchmark_defun_matmul_forward_backward_2_by_2_CPU(self):
self._benchmark_matmul_forward_backward_2_by_2_CPU(False)
def benchmark_defun_matmul_forward_backward_2_by_2_CPU_async(self):
self._benchmark_matmul_forward_backward_2_by_2_CPU_async(False)
def benchmark_defun_eager_matmul_forward_backward_2_by_2_CPU(self):
self._benchmark_matmul_forward_backward_2_by_2_CPU(True)
def benchmark_defun_eager_matmul_forward_backward_2_by_2_CPU_async(self):
self._benchmark_matmul_forward_backward_2_by_2_CPU_async(True)
def benchmark_tf_matmul_2_by_2_GPU(self):
if not context.num_gpus():
return
with context.device(GPU):
m = self._m_2_by_2.gpu()
self._benchmark_tf_matmul(
m, transpose_b=False, num_iters=self._num_iters_2_by_2)
def benchmark_tf_matmul_2_by_2_GPU_async(self):
if not context.num_gpus():
return
with context.device(GPU):
m = self._m_2_by_2.gpu()
self._benchmark_tf_matmul(
m,
transpose_b=False,
num_iters=self._num_iters_2_by_2,
execution_mode=context.ASYNC)
def benchmark_gen_math_ops_matmul_2_by_2_GPU(self):
if not context.num_gpus():
return
with context.device(GPU):
m = self._m_2_by_2.gpu()
self._benchmark_gen_math_ops_matmul(
m, transpose_b=False, num_iters=self._num_iters_2_by_2)
def benchmark_tfe_py_execute_matmul_2_by_2_GPU(self):
if not context.num_gpus():
return
with context.device(GPU):
m = self._m_2_by_2.gpu()
self._benchmark_tfe_py_execute_matmul(
m, transpose_b=False, num_iters=self._num_iters_2_by_2)
def benchmark_defun_matmul_2_by_2_GPU(self):
if not context.num_gpus():
return
with context.device(GPU):
m = self._m_2_by_2.gpu()
self._benchmark_defun_matmul(
m, transpose_b=False, num_iters=self._num_iters_2_by_2)
def benchmark_defun_matmul_2_by_2_with_signature_GPU(self):
if not context.num_gpus():
return
with context.device(GPU):
m = self._m_2_by_2.gpu()
self._benchmark_defun_matmul_with_signature(
m, num_iters=self._num_iters_2_by_2)
def benchmark_defun_matmul_2_by_2_relaxed_shape_GPU(self):
if not context.num_gpus():
return
with context.device(GPU):
m = self._m_2_by_2.gpu()
self._benchmark_defun_matmul_relaxed_shape(
m, num_iters=self._num_iters_2_by_2)
def benchmark_defun_args_matmul_2_by_2_GPU(self):
if not context.num_gpus():
return
with context.device(GPU):
m = self._m_2_by_2.gpu()
self._benchmark_defun_args_matmul(m, num_iters=self._num_iters_2_by_2)
def benchmark_defun_matmul_2_by_2_GPU_async(self):
if not context.num_gpus():
return
with context.device(GPU):
m = self._m_2_by_2.gpu()
self._benchmark_defun_matmul(
m,
transpose_b=False,
num_iters=self._num_iters_2_by_2,
execution_mode=context.ASYNC)
def benchmark_nested_defun_matmul_2_by_2(self):
m = self._m_2_by_2.cpu()
self._benchmark_nested_defun_matmul(
m, transpose_b=False, num_iters=self._num_iters_2_by_2)
# Benchmarks for AA.T, A of dimension 100 by 784.
def benchmark_np_matmul_100_by_784(self):
self._benchmark_np_matmul(
self._m_100_by_784,
transpose_b=True,
num_iters=self._num_iters_100_by_784)
def benchmark_tf_matmul_100_by_784_CPU(self):
with context.device(CPU):
m = self._m_100_by_784.cpu()
self._benchmark_tf_matmul(
m, transpose_b=True, num_iters=self._num_iters_100_by_784)
def benchmark_tf_matmul_100_by_784_CPU_async(self):
with context.device(CPU):
m = self._m_100_by_784.cpu()
self._benchmark_tf_matmul(
m,
transpose_b=True,
num_iters=self._num_iters_100_by_784,
execution_mode=context.ASYNC)
def benchmark_gen_math_ops_matmul_100_by_784_CPU(self):
with context.device(CPU):
m = self._m_100_by_784.cpu()
self._benchmark_gen_math_ops_matmul(
m, transpose_b=True, num_iters=self._num_iters_100_by_784)
def benchmark_tfe_py_fastpath_execute_matmul_100_by_784_CPU(self):
with context.device(CPU):
m = self._m_100_by_784.cpu()
self._benchmark_tfe_py_fastpath_execute_matmul(
m, transpose_b=True, num_iters=self._num_iters_100_by_784)
def benchmark_tfe_py_execute_matmul_100_by_784_CPU(self):
with context.device(CPU):
m = self._m_100_by_784.cpu()
self._benchmark_tfe_py_execute_matmul(
m, transpose_b=True, num_iters=self._num_iters_100_by_784)
def benchmark_defun_matmul_100_by_784_CPU(self):
with context.device(CPU):
m = self._m_100_by_784.cpu()
self._benchmark_defun_matmul(
m, transpose_b=True, num_iters=self._num_iters_100_by_784)
def benchmark_tf_matmul_100_by_784_GPU(self):
if not context.num_gpus():
return
with context.device(GPU):
m = self._m_100_by_784.gpu()
self._benchmark_tf_matmul(
m, transpose_b=True, num_iters=self._num_iters_100_by_784)
def benchmark_tf_matmul_100_by_784_GPU_async(self):
if not context.num_gpus():
return
with context.device(GPU):
m = self._m_100_by_784.gpu()
self._benchmark_tf_matmul(
m,
transpose_b=True,
num_iters=self._num_iters_100_by_784,
execution_mode=context.ASYNC)
def benchmark_gen_math_ops_matmul_100_by_784_GPU(self):
if not context.num_gpus():
return
with context.device(GPU):
m = self._m_100_by_784.gpu()
self._benchmark_gen_math_ops_matmul(
m, transpose_b=True, num_iters=self._num_iters_100_by_784)
def benchmark_tfe_py_execute_matmul_100_by_784_GPU(self):
if not context.num_gpus():
return
with context.device(GPU):
m = self._m_100_by_784.gpu()
self._benchmark_tfe_py_execute_matmul(
m, transpose_b=True, num_iters=self._num_iters_100_by_784)
def benchmark_defun_matmul_100_by_784_GPU(self):
if not context.num_gpus():
return
with context.device(GPU):
m = self._m_100_by_784.gpu()
self._benchmark_defun_matmul(
m, transpose_b=True, num_iters=self._num_iters_100_by_784)
@test_util.disable_tfrt(
"b/169371527: Support inserting transfer op in lowering.")
def benchmark_nested_defun_matmul_100_by_784_GPU(self):
m = self._m_100_by_784.gpu()
self._benchmark_nested_defun_matmul(
m, transpose_b=True, num_iters=self._num_iters_100_by_784)
def _benchmark_forwardprop_matmul_CPU(self, shape):
with ops.device(CPU):
m = random_ops.random_uniform(shape).cpu()
tangent = random_ops.random_uniform(shape).cpu()
def func():
with forwardprop.ForwardAccumulator(m, tangent) as acc:
result = math_ops.matmul(m, m, transpose_b=True)
return result, acc.jvp(result)
# Warmup before benchmark
for _ in range(100):
func()
self._run(func, 3000)
def _benchmark_forwardprop_in_defun_matmul_CPU(self, shape):
with ops.device(CPU):
@def_function.function
def compiled_function(x, tangent):
with forwardprop.ForwardAccumulator(x, tangent) as acc:
result = math_ops.matmul(x, x, transpose_b=True)
return result, acc.jvp(result)
m = random_ops.random_uniform(shape).cpu()
tangent = random_ops.random_uniform(shape).cpu()
func = lambda: compiled_function(m, tangent)
# Warmup before benchmark
for _ in range(100):
func()
self._run(func, 3000)
def _benchmark_forwardprop_in_defun_of_defun_matmul_CPU(self, shape):
with ops.device(CPU):
matmul = def_function.function(math_ops.matmul)
@def_function.function()
def compiled_function(x, tangent):
with forwardprop.ForwardAccumulator(x, tangent) as acc:
result = matmul(x, x, transpose_b=True)
return result, acc.jvp(result)
m = random_ops.random_uniform(shape).cpu()
tangent = random_ops.random_uniform(shape).cpu()
func = lambda: compiled_function(m, tangent)
# Warmup before benchmark
for _ in range(100):
func()
self._run(func, 3000)
def _benchmark_forwardprop_of_defun_matmul_CPU(self, shape):
with ops.device(CPU):
m = random_ops.random_uniform(shape).cpu()
tangent = random_ops.random_uniform(shape).cpu()
matmul = def_function.function(math_ops.matmul)
def func():
with forwardprop.ForwardAccumulator(m, tangent) as acc:
result = matmul(m, m, transpose_b=True)
return result, acc.jvp(result)
# Warmup before benchmark
for _ in range(100):
func()
self._run(func, 3000)
def benchmark_forwardprop_matmul_256_by_2096_CPU(self):
self._benchmark_forwardprop_matmul_CPU(shape=(256, 2096))
def benchmark_forwardprop_in_defun_matmul_256_by_2096_CPU(self):
self._benchmark_forwardprop_in_defun_matmul_CPU(shape=(256, 2096))
def benchmark_forwardprop_in_defun_of_defun_matmul_256_by_2096_CPU(self):
self._benchmark_forwardprop_in_defun_of_defun_matmul_CPU(shape=(256, 2096))
def benchmark_forwardprop_of_defun_matmul_256_by_2096_CPU(self):
self._benchmark_forwardprop_of_defun_matmul_CPU(shape=(256, 2096))
def benchmark_forwardprop_matmul_100_by_784_CPU(self):
self._benchmark_forwardprop_matmul_CPU(shape=(100, 784))
def benchmark_forwardprop_in_defun_matmul_100_by_784_CPU(self):
self._benchmark_forwardprop_in_defun_matmul_CPU(shape=(100, 784))
def benchmark_forwardprop_in_defun_of_defun_matmul_100_by_784_CPU(self):
self._benchmark_forwardprop_in_defun_of_defun_matmul_CPU(shape=(100, 784))
def benchmark_forwardprop_of_defun_matmul_100_by_784_CPU(self):
self._benchmark_forwardprop_of_defun_matmul_CPU(shape=(100, 784))
def _benchmark_tf_reduce_logsumexp(self,
device=CPU,
execution_mode=None,
defunc=False,
xla_compile=False):
with context.device(device):
x = constant_op.constant([[1, 0.], [0., 0.]])
if defunc:
reduce_func = def_function.function(
math_ops.reduce_logsumexp, experimental_compile=xla_compile)
func = lambda: reduce_func(x)
else:
func = lambda: math_ops.reduce_logsumexp(x)
self._run(func, 3000, execution_mode=execution_mode)
def benchmark_tf_reduce_logsumexp_CPU(self):
self._benchmark_tf_reduce_logsumexp()
def benchmark_tf_reduce_logsumexp_CPU_async(self):
self._benchmark_tf_reduce_logsumexp(execution_mode=context.ASYNC)
def benchmark_tf_reduce_logsumexp_GPU(self):
self._benchmark_tf_reduce_logsumexp(device=GPU)
def benchmark_tf_reduce_logsumexp_GPU_async(self):
self._benchmark_tf_reduce_logsumexp(device=GPU,
execution_mode=context.ASYNC)
@test_util.disable_tfrt(
"b/169371527: Support inserting transfer op in lowering.")
def benchmark_tf_reduce_logsumexp_CPU_defunc(self):
self._benchmark_tf_reduce_logsumexp(defunc=True)
@test_util.disable_tfrt(
"b/169371527: Support inserting transfer op in lowering.")
def benchmark_tf_reduce_logsumexp_CPU_async_defun(self):
self._benchmark_tf_reduce_logsumexp(
execution_mode=context.ASYNC, defunc=True)
def benchmark_tf_reduce_logsumexp_GPU_defun(self):
self._benchmark_tf_reduce_logsumexp(device=GPU, defunc=True)
def benchmark_tf_reduce_logsumexp_GPU_async_defun(self):
self._benchmark_tf_reduce_logsumexp(
device=GPU, execution_mode=context.ASYNC, defunc=True)
def benchmark_tf_reduce_logsumexp_GPU_defun_compile(self):
self._benchmark_tf_reduce_logsumexp(
device=GPU, defunc=True, xla_compile=True)
def benchmark_tf_reduce_logsumexp_GPU_async_defun_compile(self):
self._benchmark_tf_reduce_logsumexp(
device=GPU, execution_mode=context.ASYNC, defunc=True, xla_compile=True)
def _benchmark_tf_tensordot(self, device=CPU, execution_mode=None):
with context.device(device):
a = array_ops.ones((2, 2))
b = array_ops.ones((2, 2))
func = lambda: math_ops.tensordot(a, b, [[1], [0]])
self._run(func, 30000, execution_mode=execution_mode)
def benchmark_tf_tensordot_CPU(self):
self._benchmark_tf_tensordot()
def benchmark_tf_tensordot_CPU_async(self):
self._benchmark_tf_tensordot(execution_mode=context.ASYNC)
def benchmark_tf_tensordot_GPU(self):
self._benchmark_tf_tensordot(device=GPU)
def benchmark_tf_tensordot_GPU_async(self):
self._benchmark_tf_tensordot(device=GPU, execution_mode=context.ASYNC)
def _benchmark_tf_zeros(self, shape, dtype, device=CPU):
with context.device(device):
func = lambda: array_ops.zeros(shape, dtype)
self._run(func, 3000)
def benchmark_tf_zeros_2_by_2_float32_CPU(self):
self._benchmark_tf_zeros((2, 2), dtypes.float32)
def benchmark_tf_zeros_2_by_2_bool_CPU(self):
self._benchmark_tf_zeros((2, 2), dtypes.bool)
def benchmark_tf_zeros_2_by_2_string_CPU(self):
self._benchmark_tf_zeros((2, 2), dtypes.string)
def benchmark_tf_zeros_2_by_2_float32_GPU(self):
self._benchmark_tf_zeros((2, 2), dtypes.float32, device=GPU)
def benchmark_tf_zeros_2_by_2_bool_GPU(self):
self._benchmark_tf_zeros((2, 2), dtypes.bool, device=GPU)
def benchmark_tf_zeros_30_by_30_float32_CPU(self):
self._benchmark_tf_zeros((30, 30), dtypes.float32)
def benchmark_tf_zeros_30_by_30_bool_CPU(self):
self._benchmark_tf_zeros((30, 30), dtypes.bool)
def benchmark_tf_zeros_30_by_30_string_CPU(self):
self._benchmark_tf_zeros((30, 30), dtypes.string)
def benchmark_tf_zeros_30_by_30_float32_GPU(self):
self._benchmark_tf_zeros((30, 30), dtypes.float32, device=GPU)
def benchmark_tf_zeros_30_by_30_bool_GPU(self):
self._benchmark_tf_zeros((30, 30), dtypes.bool, device=GPU)
def benchmark_tf_zeros_100_by_100_float32_CPU(self):
self._benchmark_tf_zeros((100, 100), dtypes.float32)
def benchmark_tf_zeros_100_by_100_bool_CPU(self):
self._benchmark_tf_zeros((100, 100), dtypes.bool)
def benchmark_tf_zeros_100_by_100_string_CPU(self):
self._benchmark_tf_zeros((100, 100), dtypes.string)
def benchmark_tf_zeros_100_by_100_float32_GPU(self):
self._benchmark_tf_zeros((100, 100), dtypes.float32, device=GPU)
def benchmark_tf_zeros_100_by_100_bool_GPU(self):
self._benchmark_tf_zeros((100, 100), dtypes.bool, device=GPU)
def _benchmark_tf_zeros_like(self, m, device=CPU):
with context.device(device):
func = lambda: array_ops.zeros_like(m)
self._run(func, 3000)
def benchmark_tf_zeros_like_CPU(self):
self._benchmark_tf_zeros_like(self._m_2_by_2)
def benchmark_tf_zeros_like_GPU(self):
self._benchmark_tf_zeros_like(self._m_2_by_2, device=GPU)
def benchmark_tf_zeros_like_variable_CPU(self):
m = resource_variable_ops.ResourceVariable(self._m_2_by_2)
self._benchmark_tf_zeros_like(m)
def benchmark_tf_zeros_like_variable_GPU(self):
m = resource_variable_ops.ResourceVariable(self._m_2_by_2)
self._benchmark_tf_zeros_like(m, device=GPU)
def _benchmark_tf_random_uniform_2_by_2(self,
shape=(2, 2),
dtype=dtypes.int32,
device=CPU):
with context.device(device):
def func():
return random_ops.random_uniform(shape, maxval=3, dtype=dtype)
self._run(func, num_iters=self._num_iters_2_by_2)
def benchmark_tf_random_uniform_2_by_2_integer_CPU(self):
self._benchmark_tf_random_uniform_2_by_2()
def benchmark_tf_random_uniform_2_by_2_integer_GPU(self):
self._benchmark_tf_random_uniform_2_by_2(device=GPU)
def benchmark_tf_random_uniform_2_by_2_float_CPU(self):
self._benchmark_tf_random_uniform_2_by_2(dtype=dtypes.float32)
def benchmark_tf_random_uniform_2_by_2_float_GPU(self):
self._benchmark_tf_random_uniform_2_by_2(
dtype=dtypes.float32, device=GPU)
def benchmark_tf_random_uniform_2_by_2_default_setting_CPU(self):
with context.device(CPU):
func = lambda: random_ops.random_uniform((2, 2))
self._run(func, num_iters=self._num_iters_2_by_2)
def benchmark_tf_random_uniform_2_by_2_default_setting_GPU(self):
with context.device(GPU):
func = lambda: random_ops.random_uniform((2, 2))
self._run(func, num_iters=self._num_iters_2_by_2)
def _benchmark_tf_dropout_2_by_2(self,
is_rate_tensor=True,
noise_shape=None,
device=CPU):
if is_rate_tensor:
rate = constant_op.constant(0.5, dtype=dtypes.float32)
else:
rate = 0.5
with context.device(device):
def func():
return nn_ops.dropout(
self._m_2_by_2, rate=rate, noise_shape=noise_shape)
self._run(func, num_iters=self._num_iters_2_by_2)
def benchmark_tf_dropout_scalar_rate_2_by_2_CPU(self):
self._benchmark_tf_dropout_2_by_2(is_rate_tensor=False)
def benchmark_tf_dropout_scalar_rate_2_by_2_GPU(self):
self._benchmark_tf_dropout_2_by_2(is_rate_tensor=False, device=GPU)
def benchmark_tf_dropout_2_by_2_CPU(self):
self._benchmark_tf_dropout_2_by_2()
def benchmark_tf_dropout_2_by_2_GPU(self):
self._benchmark_tf_dropout_2_by_2(device=GPU)
def _benchmark_transpose(self,
m,
num_iters,
perm=None,
conjugate=False,
execution_mode=None):
func = lambda: array_ops.transpose(m, perm, conjugate)
self._run(func, num_iters, execution_mode=execution_mode)
def benchmark_tf_transpose_2_by_2_CPU(self):
with context.device(CPU):
m = self._m_2_by_2.cpu()
self._benchmark_transpose(m, num_iters=self._num_iters_2_by_2)
def benchmark_tf_transpose_2_by_2_GPU(self):
with context.device(GPU):
m = self._m_2_by_2.gpu()
self._benchmark_transpose(m, num_iters=self._num_iters_2_by_2)
def benchmark_tf_transpose_variable_2_by_2_CPU(self):
with context.device(CPU):
m = resource_variable_ops.ResourceVariable(self._m_2_by_2)
self._benchmark_transpose(m, num_iters=self._num_iters_2_by_2)
def benchmark_tf_transpose_variable_2_by_2_GPU(self):
with context.device(GPU):
m = resource_variable_ops.ResourceVariable(self._m_2_by_2)
self._benchmark_transpose(m, num_iters=self._num_iters_2_by_2)
def benchmark_defun_without_signature(self):
def func(t1, t2, t3, t4, t5, t6, t7, t8):
del t1, t2, t3, t4, t5, t6, t7, t8
return None
defined = function.defun(func)
t = constant_op.constant(0.0)
cache_computation = lambda: defined(t, t, t, t, t, t, t, t)
self._run(cache_computation, 30000)
def benchmark_defun_without_signature_and_with_kwargs(self):
def func(t1, t2, t3, t4, t5, t6, t7, t8):
del t1, t2, t3, t4, t5, t6, t7, t8
return None
defined = function.defun(func)
t = constant_op.constant(0.0)
def cache_computation():
return defined(t1=t, t2=t, t3=t, t4=t, t5=t, t6=t, t7=t, t8=t)
self._run(cache_computation, 30000)
def benchmark_defun_with_signature(self):
def func(t1, t2, t3, t4, t5, t6, t7, t8):
del t1, t2, t3, t4, t5, t6, t7, t8
return None
defined = function.defun(
func, input_signature=[tensor_spec.TensorSpec([], dtypes.float32)] * 8)
t = constant_op.constant(0.0)
signature_computation = lambda: defined(t, t, t, t, t, t, t, t)
self._run(signature_computation, 30000)
def benchmark_defun_with_signature_and_kwargs(self):
def func(t1, t2, t3, t4, t5, t6, t7, t8):
del t1, t2, t3, t4, t5, t6, t7, t8
return None
defined = function.defun(
func, input_signature=[tensor_spec.TensorSpec([], dtypes.float32)] * 8)
t = constant_op.constant(0.0)
def signature_computation():
return defined(t1=t, t2=t, t3=t, t4=t, t5=t, t6=t, t7=t, t8=t)
self._run(signature_computation, 30000)
def benchmark_matmul_read_variable_op_2_by_2_CPU(self):
with context.device(CPU):
m = resource_variable_ops.ResourceVariable(self._m_2_by_2)
self._benchmark_matmul_read_variable(m, num_iters=self._num_iters_2_by_2)
def benchmark_matmul_read_variable_op_with_tape_2_by_2_CPU(self):
with context.device(CPU):
m = resource_variable_ops.ResourceVariable(self._m_2_by_2)
self._benchmark_matmul_read_variable_with_tape(
m, num_iters=self._num_iters_2_by_2)
def benchmark_read_variable_op_2_by_2_CPU(self):
with context.device(CPU):
m = resource_variable_ops.ResourceVariable(self._m_2_by_2)
self._benchmark_read_variable(m, num_iters=self._num_iters_2_by_2)
def benchmark_read_variable_op_2_by_2_GPU(self):
if not context.num_gpus():
return
with context.device(GPU):
m = resource_variable_ops.ResourceVariable(self._m_2_by_2.gpu())
self._benchmark_read_variable(m, num_iters=self._num_iters_2_by_2)
def benchmark_read_variable_op_with_tape_2_by_2_CPU(self):
with context.device(CPU):
m = resource_variable_ops.ResourceVariable(self._m_2_by_2)
self._benchmark_read_variable_with_tape(
m, num_iters=self._num_iters_2_by_2)
def benchmark_read_variable_op_with_tape_2_by_2_GPU(self):
if not context.num_gpus():
return
with context.device(GPU):
m = resource_variable_ops.ResourceVariable(self._m_2_by_2.gpu())
self._benchmark_read_variable_with_tape(
m, num_iters=self._num_iters_2_by_2)
def benchmarkScan(self):
elems = math_ops.range(1600)
def scan():
return functional_ops.scan(
lambda a, x: a + x, elems, parallel_iterations=1)
self._run(scan, 100)
@test_util.disable_tfrt("tf.While not supported RTFB tensor. b/169374895")
def benchmarkScanDefun(self):
elems = math_ops.range(1600)
@function.defun
def scan():
return functional_ops.scan(
lambda a, x: a + x, elems, parallel_iterations=1)
self._run(scan, 100)
def benchmark_fastpath_conversion_type_inference(self):
c = constant_op.constant(1., dtype=dtypes.float32)
def fn():
return gen_math_ops.add(c, 1)
self._run(fn, 10000)
def benchmark_convert_tensor(self):
value = ops.convert_to_tensor(42)
def fn():
return ops.convert_to_tensor(value)
self._run(fn, 10000)
def _benchmark_convert_constant(self, value, cached):
global GLOBAL_TEST_VALUE
GLOBAL_TEST_VALUE = value
def cached_func():
ops.convert_to_tensor(value)
def uncached_func():
global GLOBAL_TEST_VALUE
GLOBAL_TEST_VALUE += 1
ops.convert_to_tensor(GLOBAL_TEST_VALUE)
func = cached_func if cached else uncached_func
self._run(func, 10000)
def benchmark_convert_python_int(self):
self._benchmark_convert_constant(42, cached=True)
def benchmark_convert_python_int_uncached(self):
self._benchmark_convert_constant(42, cached=False)
def benchmark_convert_python_float(self):
self._benchmark_convert_constant(42.0, cached=True)
def benchmark_convert_python_float_uncached(self):
self._benchmark_convert_constant(42.0, cached=False)
def benchmark_convert_numpy_int(self):
self._benchmark_convert_constant(np.array(42), cached=True)
def benchmark_convert_numpy_int_uncached(self):
self._benchmark_convert_constant(np.array(42), cached=False)
def benchmark_convert_numpy_float(self):
self._benchmark_convert_constant(np.array(42.0), cached=True)
def benchmark_convert_numpy_float_uncached(self):
self._benchmark_convert_constant(np.array(42.0), cached=False)
def benchmark_convert_3x_list_to_tensor(self):
xs = [1, 2, 3]
self._run(lambda: ops.convert_to_tensor(xs), 1000)
def benchmark_convert_3x_array_to_tensor(self):
xs = np.array([1, 2, 3], dtype=np.int32)
self._run(lambda: ops.convert_to_tensor(xs), 1000)
def benchmark_constant_40x2_list_to_tensor(self):
xs = [[0] * 2] * 40
self._run(lambda: constant_op.constant(xs), 1000)
def benchmark_constant_40x2_array_to_tensor(self):
xs = np.array([[0] * 2] * 40, dtype=np.int32)
self._run(lambda: constant_op.constant(xs), 1000)
def benchmark_constant_40x_list_of_2x_arrays_to_tensor(self):
xs = [np.array([0] * 2, dtype=np.int32)] * 40
self._run(lambda: constant_op.constant(xs), 1000)
def benchmark_constant_20x20x20_double_list_to_float32_tensor(self):
xs = [[[np.linspace(0, 1, 21).tolist()] * 20] * 20]
self._run(lambda: constant_op.constant(xs, dtype=dtypes.float32), 10000)
def benchmark_constant_20x20x20_double_list_to_float64_tensor(self):
xs = [[[np.linspace(0, 1, 21).tolist()] * 20] * 20]
self._run(lambda: constant_op.constant(xs, dtype=dtypes.float64), 10000)
def benchmark_list_of_zeros_to_np_array(self):
values = []
for _ in range(1000):
values.append(array_ops.zeros(shape=(1000,)))
self._run(lambda: np.array([x.numpy() for x in values]), 1000)
def benchmark_function_trace(self):
def func(x):
return x
self._run(lambda: (def_function.function(func)(x) for x in range(1000)),
30000)
def _benchmarkFunctionWithResourceInputs(self, num_resources, num_iters):
@def_function.function
def add_all(*args):
return math_ops.add_n(*args)
with context.device(CPU):
resources = []
for _ in range(num_resources):
resources.append(resource_variable_ops.ResourceVariable(self._m_2))
self._run(lambda: add_all(resources), num_iters)
def benchmarkFunctionWithFiveResourceInputs(self):
self._benchmarkFunctionWithResourceInputs(5, 1000)
def benchmarkFunctionWithFiveHundredResourceInputs(self):
self._benchmarkFunctionWithResourceInputs(500, 100)
def _benchmarkResourceReadsInCondInInnerFunc(self, var_count):
rvars = []
for _ in range(var_count):
rvars.append(resource_variable_ops.ResourceVariable(1.0))
# Note: We want to benchmark the graph building time so we intentionally
# add this outer function so that the tf.function gets retraced every time.
def benchmark_fn():
@def_function.function
def fn_with_many_reads():
@def_function.function
def fn_with_many_reads_inner():
def then_branch():
return math_ops.add_n(rvars)
def else_branch():
return 0.
return control_flow_ops.cond(
constant_op.constant(True), then_branch, else_branch)
return fn_with_many_reads_inner()
return fn_with_many_reads()
with context.device(CPU):
self._run(benchmark_fn, 10)
def benchmarkTenThousandResourceReadsInCondInInnerFunc(self):
self._benchmarkResourceReadsInCondInInnerFunc(10000)
def benchmarkHundredResourceReadsInCondInInnerFunc(self):
self._benchmarkResourceReadsInCondInInnerFunc(100)
def benchmarkTenResourceReadsInCondInInnerFunc(self):
self._benchmarkResourceReadsInCondInInnerFunc(10)
def benchmark_tf_name_scope(self):
def fn():
with ops.name_scope_v2("name"):
pass
self._run(fn, 10000)
def benchmark_tf_nest_map_structure(self):
nested = {"a": [1, 2, 3], "b": (4, 5, 6)}
def fn():
nest.map_structure(lambda x: x, nested)
self._run(fn, 10000)
def benchmark_tf_nest_pack_sequence_as(self):
nested = {"a": [1, 2, 3], "b": (4, 5, 6)}
flat = nest.flatten(nested)
def fn():
nest.pack_sequence_as(nested, flat)
self._run(fn, 10000)
def benchmark_tf_nest_flatten_none(self):
def fn():
nest.flatten(None)
self._run(fn, 100000)
def benchmark_tf_nest_flatten(self):
nested = {"a": [1, 2, 3], "b": (4, 5, 6)}
def fn():
nest.flatten(nested)
self._run(fn, 100000)
def benchmark_tf_nn_convolution_overhead(self):
inputs = array_ops.ones((1, 1, 1, 1))
filters = array_ops.ones((1, 1, 1, 1))
def fn():
nn_ops.convolution_v2(inputs, filters)
self._run(fn, 10000)
def benchmark_tf_tensor_shape_creation_overhead(self):
# A `TensorShape` is created the first time `EagerTensor.shape` is
# called, which puts `TensorShape.__init__` on the hotpath. The
# `TensorShape` is created from `EagerTensor._shape_tuple`.
x = array_ops.ones((1, 1))
shape_tuple = x._shape_tuple()
def fn():
tensor_shape.TensorShape(shape_tuple)
self._run(fn, 100000)
if __name__ == "__main__":
test.main()
|
|
#!/usr/bin/env python
from sys import exit
from os import environ
environ['KERAS_BACKEND'] = 'theano'
import numpy as np
from functools import partial
from tqdm import tqdm
from utils import *
from keras.layers import Input, Dense, Dropout, Activation, concatenate
from keras.models import Model
from keras.callbacks import ModelCheckpoint
from keras.optimizers import Adam
from keras.utils import np_utils
from keras.losses import categorical_crossentropy
### HELPERS ###
def reform(arr, train_frac, val_frac, fields, weight, label, extra_fields=[]):
n = arr.shape[0]
ns = {}
ns['train'] = (0,int(train_frac*n))
ns['val'] = (ns['train'][1],ns['train'][1]+int(val_frac*n))
ns['test'] = (ns['val'][1],n)
print 'label=%i, n_train=%i, n_val=%i, n_test=%i'%(label,ns['train'][1],ns['val'][1]-ns['val'][0],ns['test'][1]-ns['test'][0])
weight_norm = 100. / np.sum(arr[weight])
x = {}; y = {}; w = {}; extras = {}
for subset in ['train','val','test']:
n_ = ns[subset]
x[subset] = arr[fields].view(np.float64).reshape(arr[fields].shape+(-1,))[n_[0]:n_[1]]
w[subset] = arr[weight][n_[0]:n_[1]] * weight_norm
y[subset] = (label * np.ones(n_[1]-n_[0])).astype(int)
for e in extra_fields:
extras[subset+'_'+e] = arr[e][n_[0]:n_[1]]
return {'x':x,'y':y,'w':w,'extras':extras}
def load_data(train_frac,val_frac,fields):
# arr_bkg = np.load('../data/Background_selected.npy')
# arr_sig = np.load('../data/Top_selected.npy')
arr_bkg = np.load('../data/QCD_goodjets.npy')
arr_sig = np.load('../data/ZpTT_goodjets.npy')
np.random.shuffle(arr_bkg)
np.random.shuffle(arr_sig)
bkg = reform(arr_bkg,train_frac,val_frac,fields,'weight',0,['top_ecf_bdt','msd'])
sig = reform(arr_sig,train_frac,val_frac,fields,'weight',1,['top_ecf_bdt','msd'])
x = {}; y = {}; w = {}; bdt = {}; mass = {}
for subset in ['train','val','test']:
x[subset] = np.concatenate((bkg['x'][subset],sig['x'][subset]), axis=0)
w[subset] = np.concatenate((bkg['w'][subset],sig['w'][subset]), axis=0)
bdt[subset] = np.concatenate((bkg['extras'][subset+'_top_ecf_bdt'],
sig['extras'][subset+'_top_ecf_bdt']), axis=0)
mass[subset] = np.concatenate((bkg['extras'][subset+'_msd'],
sig['extras'][subset+'_msd']), axis=0)
y_vec = np.concatenate((bkg['y'][subset],sig['y'][subset]), axis=0)
y[subset] = np_utils.to_categorical(y_vec, 2)
mass[subset] = mass[subset].reshape((mass[subset].shape[0],1))
bdt[subset] = bdt[subset].reshape((bdt[subset].shape[0],1))
return x,y,w,bdt,mass
### ACQUIRE DATA ###
fields = ['tau32sd','frec'] + ['ecf%i'%i for i in xrange(11)]
x,y,w,bdt,mass = load_data(0.5,0.25,fields)
for subset in bdt:
bdt[subset] = bdt[subset].reshape((bdt[subset].shape[0],))
dim = x['train'].shape[1]
y_gan = {subset:np.concatenate([y[subset],mass[subset]], axis=1)
for subset in y}
### BUILD THE MODELS ###
# Discrimination model
d_input = Input(shape=(dim,), name='hlf')
l = Dense(64, activation='relu')(d_input)
l = Dense(64, activation='relu')(l)
l = Dense(32, activation='relu')(l)
d_output = Dense(2, activation='softmax', name='hlf_disc')(l)
d_model = Model(inputs=d_input, outputs=d_output)
d_model.compile(optimizer=Adam(),
loss='categorical_crossentropy')
d_model.summary()
# Generation model
g_input = Input(shape=(2,),name='disc')
# l = GradientReversalLayer(hp_lambda=100, name='reversal')(g_input)
l = Dense(32, activation='relu')(g_input)
l = Dense(32, activation='sigmoid')(l)
g_output = Dense(1, activation='linear', name='hlf_gen')(l)
g_model = Model(inputs=g_input, outputs=g_output)
g_model.compile(optimizer=Adam(lr=1.),
loss='mse')
g_model.summary()
# Add the models
gan_input = Input(shape=(dim,), name='hlf_gan')
gan_d = d_model(gan_input)
gan_reverse_1 = GradientReversalLayer(hp_lambda=1, name='reversal_1')(gan_d)
gan_g = g_model(gan_reverse_1)
gan_reverse_2 = GradientReversalLayer(hp_lambda=1, name='reversal_2')(gan_g)
gan_output = concatenate([gan_d,gan_reverse_2],axis=1)
my_adversarial_loss = partial(adversarial_loss, g_weight=100.)
my_adversarial_loss.__name__ = "my_adversarial_loss" # partial doesn't do this for some reason
gan_model = Model(inputs=gan_input, outputs=gan_output)
gan_model.compile(optimizer=Adam(lr=0.001),
loss=my_adversarial_loss)
### PRE-TRAIN DISCRIMINATOR ###
d_model.fit(x['train'], y['train'], sample_weight=w['train'],
batch_size=500, epochs=1, verbose=1,
shuffle=True)
y_pred_v0 = d_model.predict(x['test'])
### PRE-TRAIN GENERATOR ###
y_pred = d_model.predict(x['train'])
bkg_mask = y['train'][:,0]==1
g_model.fit(y_pred[bkg_mask], mass['train'][bkg_mask],
sample_weight=w['train'][bkg_mask],
batch_size=32, epochs=1, verbose=1,
shuffle=True)
### TRAIN THE ADVERSARIAL STACK ###
n_test_fast = 20
test_idx = np.random.random_integers(low=0,high=x['test'].shape[0],size=n_test_fast)
# y_pred = gan_model.predict(x['test'][test_idx])
# for i in range(n_test_fast):
# print 'tag: %i -> %4.3f, mass: %6.3f -> %6.3f'%(y_gan['test'][test_idx[i]][1],
# y_pred[i][1],
# y_gan['test'][test_idx[i]][2],
# y_pred[i][2],)
# checkpoint = ModelCheckpoint(filepath='simple_disc.h5', save_best_only=True)
for big_epoch in range(1):
batch_size = 500
n_train = x['train'].shape[0]
n_batch = n_train / batch_size
order = range(n_train)
np.random.shuffle(order)
for batch in tqdm(range(n_batch)):
idxs = order[batch*batch_size : (batch+1)*batch_size]
w_ = w['train'][idxs]
x_ = x['train'][idxs]
y_ = y['train'][idxs]
y_gan_ = y_gan['train'][idxs]
mass_ = mass['train'][idxs]
bkg_mask = y_[:,0]==1
# # now train the stack
# make_trainable(g_model,False)
gan_loss = gan_model.train_on_batch(x_, y_gan_, sample_weight=w_)
# make_trainable(g_model,True)
# run the discriminator
y_pred = d_model.predict(x_)
d_loss = d_model.evaluate(x_, y_,
verbose=0, sample_weight=w_)
# train the generator
g_loss = g_model.train_on_batch(y_pred[bkg_mask], mass_[bkg_mask],
sample_weight=w_[bkg_mask])
# if batch%1000==0:
# print d_loss, g_loss, gan_loss
# y_pred = d_model.predict(x['val'])
# print d_model.evaluate(x['val'],y['val'],
# verbose=1, sample_weight=w['val'])
# print g_model.evaluate(y_pred,mass['val'],
# verbose=1, sample_weight=w['val'])
# print gan_model.evaluate(x['val'],y_gan['val'],
# verbose=1, sample_weight=w['val'])
y_pred_v1 = gan_model.predict(x['test'])
dnn_v0_t = Tagger(y_pred_v0[:,1], 'DNN v0', 0, 1, False)
dnn_v1_t = Tagger(y_pred_v1[:,1], 'DNN v1', 0, 1, False)
bdt_t = Tagger(bdt['test'], 'BDT', -1, 1, False)
create_roc([dnn_v0_t,dnn_v1_t,bdt_t],
np.argmax(y['test'],axis=1),
w['test'],'gan')
mask = np.logical_and(110<mass['test'], mass['test']<210).reshape((y['test'].shape[0],))
dnn_v0_t_mass = Tagger(y_pred_v0[:,1][mask], 'DNN v0', 0, 1, False)
dnn_v1_t_mass = Tagger(y_pred_v1[:,1][mask], 'DNN v1', 0, 1, False)
bdt_t_mass = Tagger(bdt['test'][mask], 'BDT', -1, 1, False)
wps = create_roc([dnn_v0_t_mass, dnn_v1_t_mass, bdt_t_mass],
np.argmax(y['test'][mask],axis=1),
w['test'][mask],'gan_mass')
print wps
mask_v0 = np.logical_and(y_pred_v0[:,1]>wps[0], y['test'][:,0]==1)
mask_v1 = np.logical_and(y_pred_v1[:,1]>wps[1], y['test'][:,0]==1)
mask_bdt = np.logical_and(bdt['test']>wps[2], y['test'][:,0]==1)
mask_bkg = y['test'][:,0]==1
mass_test = mass['test'].reshape((mass['test'].shape[0],))
props = {'xlabel' : '$m_{SD}$ [GeV]',
'bins' : np.arange(0,500,20),
'output' : 'sculpt'}
h_inc = {'vals':mass_test[mask_bkg],
'weights':w['test'][mask_bkg],
'color':'b', 'label':'Inclusive'}
h_v0 = {'vals':mass_test[mask_v0],
'weights':w['test'][mask_v0],
'color':'k', 'label':'DDN v0'}
h_v1 = {'vals':mass_test[mask_v1],
'weights':w['test'][mask_v1],
'color':'r', 'label':'DDN v1'}
h_bdt = {'vals':mass_test[mask_bdt],
'weights':w['test'][mask_bdt],
'color':'g', 'label':'BDT'}
plot_hists(props, [h_inc, h_v0, h_v1])
|
|
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
import functools
from typing import Any, Callable, Dict, Generic, Iterable, Optional, TypeVar, Union
import warnings
from azure.core.exceptions import ClientAuthenticationError, HttpResponseError, ResourceExistsError, ResourceNotFoundError, map_error
from azure.core.paging import ItemPaged
from azure.core.pipeline import PipelineResponse
from azure.core.pipeline.transport import HttpResponse
from azure.core.rest import HttpRequest
from azure.core.tracing.decorator import distributed_trace
from azure.mgmt.core.exceptions import ARMErrorFormat
from msrest import Serializer
from .. import models as _models
from .._vendor import _convert_request, _format_url_section
T = TypeVar('T')
JSONType = Any
ClsType = Optional[Callable[[PipelineResponse[HttpRequest, HttpResponse], T, Dict[str, Any]], Any]]
_SERIALIZER = Serializer()
_SERIALIZER.client_side_validation = False
def build_list_request(
subscription_id: str,
**kwargs: Any
) -> HttpRequest:
api_version = "2021-11-01-preview"
accept = "application/json"
# Construct URL
url = kwargs.pop("template_url", '/subscriptions/{subscriptionId}/providers/Microsoft.ContainerService/snapshots')
path_format_arguments = {
"subscriptionId": _SERIALIZER.url("subscription_id", subscription_id, 'str'),
}
url = _format_url_section(url, **path_format_arguments)
# Construct parameters
query_parameters = kwargs.pop("params", {}) # type: Dict[str, Any]
query_parameters['api-version'] = _SERIALIZER.query("api_version", api_version, 'str')
# Construct headers
header_parameters = kwargs.pop("headers", {}) # type: Dict[str, Any]
header_parameters['Accept'] = _SERIALIZER.header("accept", accept, 'str')
return HttpRequest(
method="GET",
url=url,
params=query_parameters,
headers=header_parameters,
**kwargs
)
def build_list_by_resource_group_request(
subscription_id: str,
resource_group_name: str,
**kwargs: Any
) -> HttpRequest:
api_version = "2021-11-01-preview"
accept = "application/json"
# Construct URL
url = kwargs.pop("template_url", '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.ContainerService/snapshots')
path_format_arguments = {
"subscriptionId": _SERIALIZER.url("subscription_id", subscription_id, 'str'),
"resourceGroupName": _SERIALIZER.url("resource_group_name", resource_group_name, 'str', min_length=1),
}
url = _format_url_section(url, **path_format_arguments)
# Construct parameters
query_parameters = kwargs.pop("params", {}) # type: Dict[str, Any]
query_parameters['api-version'] = _SERIALIZER.query("api_version", api_version, 'str')
# Construct headers
header_parameters = kwargs.pop("headers", {}) # type: Dict[str, Any]
header_parameters['Accept'] = _SERIALIZER.header("accept", accept, 'str')
return HttpRequest(
method="GET",
url=url,
params=query_parameters,
headers=header_parameters,
**kwargs
)
def build_get_request(
subscription_id: str,
resource_group_name: str,
resource_name: str,
**kwargs: Any
) -> HttpRequest:
api_version = "2021-11-01-preview"
accept = "application/json"
# Construct URL
url = kwargs.pop("template_url", '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.ContainerService/snapshots/{resourceName}')
path_format_arguments = {
"subscriptionId": _SERIALIZER.url("subscription_id", subscription_id, 'str'),
"resourceGroupName": _SERIALIZER.url("resource_group_name", resource_group_name, 'str', min_length=1),
"resourceName": _SERIALIZER.url("resource_name", resource_name, 'str', max_length=63, min_length=1, pattern=r'^[a-zA-Z0-9]$|^[a-zA-Z0-9][-_a-zA-Z0-9]{0,61}[a-zA-Z0-9]$'),
}
url = _format_url_section(url, **path_format_arguments)
# Construct parameters
query_parameters = kwargs.pop("params", {}) # type: Dict[str, Any]
query_parameters['api-version'] = _SERIALIZER.query("api_version", api_version, 'str')
# Construct headers
header_parameters = kwargs.pop("headers", {}) # type: Dict[str, Any]
header_parameters['Accept'] = _SERIALIZER.header("accept", accept, 'str')
return HttpRequest(
method="GET",
url=url,
params=query_parameters,
headers=header_parameters,
**kwargs
)
def build_create_or_update_request(
subscription_id: str,
resource_group_name: str,
resource_name: str,
*,
json: JSONType = None,
content: Any = None,
**kwargs: Any
) -> HttpRequest:
content_type = kwargs.pop('content_type', None) # type: Optional[str]
api_version = "2021-11-01-preview"
accept = "application/json"
# Construct URL
url = kwargs.pop("template_url", '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.ContainerService/snapshots/{resourceName}')
path_format_arguments = {
"subscriptionId": _SERIALIZER.url("subscription_id", subscription_id, 'str'),
"resourceGroupName": _SERIALIZER.url("resource_group_name", resource_group_name, 'str', min_length=1),
"resourceName": _SERIALIZER.url("resource_name", resource_name, 'str', max_length=63, min_length=1, pattern=r'^[a-zA-Z0-9]$|^[a-zA-Z0-9][-_a-zA-Z0-9]{0,61}[a-zA-Z0-9]$'),
}
url = _format_url_section(url, **path_format_arguments)
# Construct parameters
query_parameters = kwargs.pop("params", {}) # type: Dict[str, Any]
query_parameters['api-version'] = _SERIALIZER.query("api_version", api_version, 'str')
# Construct headers
header_parameters = kwargs.pop("headers", {}) # type: Dict[str, Any]
if content_type is not None:
header_parameters['Content-Type'] = _SERIALIZER.header("content_type", content_type, 'str')
header_parameters['Accept'] = _SERIALIZER.header("accept", accept, 'str')
return HttpRequest(
method="PUT",
url=url,
params=query_parameters,
headers=header_parameters,
json=json,
content=content,
**kwargs
)
def build_update_tags_request(
subscription_id: str,
resource_group_name: str,
resource_name: str,
*,
json: JSONType = None,
content: Any = None,
**kwargs: Any
) -> HttpRequest:
content_type = kwargs.pop('content_type', None) # type: Optional[str]
api_version = "2021-11-01-preview"
accept = "application/json"
# Construct URL
url = kwargs.pop("template_url", '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.ContainerService/snapshots/{resourceName}')
path_format_arguments = {
"subscriptionId": _SERIALIZER.url("subscription_id", subscription_id, 'str'),
"resourceGroupName": _SERIALIZER.url("resource_group_name", resource_group_name, 'str', min_length=1),
"resourceName": _SERIALIZER.url("resource_name", resource_name, 'str', max_length=63, min_length=1, pattern=r'^[a-zA-Z0-9]$|^[a-zA-Z0-9][-_a-zA-Z0-9]{0,61}[a-zA-Z0-9]$'),
}
url = _format_url_section(url, **path_format_arguments)
# Construct parameters
query_parameters = kwargs.pop("params", {}) # type: Dict[str, Any]
query_parameters['api-version'] = _SERIALIZER.query("api_version", api_version, 'str')
# Construct headers
header_parameters = kwargs.pop("headers", {}) # type: Dict[str, Any]
if content_type is not None:
header_parameters['Content-Type'] = _SERIALIZER.header("content_type", content_type, 'str')
header_parameters['Accept'] = _SERIALIZER.header("accept", accept, 'str')
return HttpRequest(
method="PATCH",
url=url,
params=query_parameters,
headers=header_parameters,
json=json,
content=content,
**kwargs
)
def build_delete_request(
subscription_id: str,
resource_group_name: str,
resource_name: str,
**kwargs: Any
) -> HttpRequest:
api_version = "2021-11-01-preview"
accept = "application/json"
# Construct URL
url = kwargs.pop("template_url", '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.ContainerService/snapshots/{resourceName}')
path_format_arguments = {
"subscriptionId": _SERIALIZER.url("subscription_id", subscription_id, 'str'),
"resourceGroupName": _SERIALIZER.url("resource_group_name", resource_group_name, 'str', min_length=1),
"resourceName": _SERIALIZER.url("resource_name", resource_name, 'str', max_length=63, min_length=1, pattern=r'^[a-zA-Z0-9]$|^[a-zA-Z0-9][-_a-zA-Z0-9]{0,61}[a-zA-Z0-9]$'),
}
url = _format_url_section(url, **path_format_arguments)
# Construct parameters
query_parameters = kwargs.pop("params", {}) # type: Dict[str, Any]
query_parameters['api-version'] = _SERIALIZER.query("api_version", api_version, 'str')
# Construct headers
header_parameters = kwargs.pop("headers", {}) # type: Dict[str, Any]
header_parameters['Accept'] = _SERIALIZER.header("accept", accept, 'str')
return HttpRequest(
method="DELETE",
url=url,
params=query_parameters,
headers=header_parameters,
**kwargs
)
class SnapshotsOperations(object):
"""SnapshotsOperations operations.
You should not instantiate this class directly. Instead, you should create a Client instance that
instantiates it for you and attaches it as an attribute.
:ivar models: Alias to model classes used in this operation group.
:type models: ~azure.mgmt.containerservice.v2021_11_01_preview.models
:param client: Client for service requests.
:param config: Configuration of service client.
:param serializer: An object model serializer.
:param deserializer: An object model deserializer.
"""
models = _models
def __init__(self, client, config, serializer, deserializer):
self._client = client
self._serialize = serializer
self._deserialize = deserializer
self._config = config
@distributed_trace
def list(
self,
**kwargs: Any
) -> Iterable["_models.SnapshotListResult"]:
"""Gets a list of snapshots in the specified subscription.
Gets a list of snapshots in the specified subscription.
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either SnapshotListResult or the result of cls(response)
:rtype:
~azure.core.paging.ItemPaged[~azure.mgmt.containerservice.v2021_11_01_preview.models.SnapshotListResult]
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.SnapshotListResult"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
def prepare_request(next_link=None):
if not next_link:
request = build_list_request(
subscription_id=self._config.subscription_id,
template_url=self.list.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
else:
request = build_list_request(
subscription_id=self._config.subscription_id,
template_url=next_link,
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
request.method = "GET"
return request
def extract_data(pipeline_response):
deserialized = self._deserialize("SnapshotListResult", pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return deserialized.next_link or None, iter(list_of_elem)
def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
return pipeline_response
return ItemPaged(
get_next, extract_data
)
list.metadata = {'url': '/subscriptions/{subscriptionId}/providers/Microsoft.ContainerService/snapshots'} # type: ignore
@distributed_trace
def list_by_resource_group(
self,
resource_group_name: str,
**kwargs: Any
) -> Iterable["_models.SnapshotListResult"]:
"""Lists snapshots in the specified subscription and resource group.
Lists snapshots in the specified subscription and resource group.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either SnapshotListResult or the result of cls(response)
:rtype:
~azure.core.paging.ItemPaged[~azure.mgmt.containerservice.v2021_11_01_preview.models.SnapshotListResult]
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.SnapshotListResult"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
def prepare_request(next_link=None):
if not next_link:
request = build_list_by_resource_group_request(
subscription_id=self._config.subscription_id,
resource_group_name=resource_group_name,
template_url=self.list_by_resource_group.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
else:
request = build_list_by_resource_group_request(
subscription_id=self._config.subscription_id,
resource_group_name=resource_group_name,
template_url=next_link,
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
request.method = "GET"
return request
def extract_data(pipeline_response):
deserialized = self._deserialize("SnapshotListResult", pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return deserialized.next_link or None, iter(list_of_elem)
def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
return pipeline_response
return ItemPaged(
get_next, extract_data
)
list_by_resource_group.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.ContainerService/snapshots'} # type: ignore
@distributed_trace
def get(
self,
resource_group_name: str,
resource_name: str,
**kwargs: Any
) -> "_models.Snapshot":
"""Gets a snapshot.
Gets a snapshot.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param resource_name: The name of the managed cluster resource.
:type resource_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: Snapshot, or the result of cls(response)
:rtype: ~azure.mgmt.containerservice.v2021_11_01_preview.models.Snapshot
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.Snapshot"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
request = build_get_request(
subscription_id=self._config.subscription_id,
resource_group_name=resource_group_name,
resource_name=resource_name,
template_url=self.get.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = self._deserialize('Snapshot', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
get.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.ContainerService/snapshots/{resourceName}'} # type: ignore
@distributed_trace
def create_or_update(
self,
resource_group_name: str,
resource_name: str,
parameters: "_models.Snapshot",
**kwargs: Any
) -> "_models.Snapshot":
"""Creates or updates a snapshot.
Creates or updates a snapshot.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param resource_name: The name of the managed cluster resource.
:type resource_name: str
:param parameters: The snapshot to create or update.
:type parameters: ~azure.mgmt.containerservice.v2021_11_01_preview.models.Snapshot
:keyword callable cls: A custom type or function that will be passed the direct response
:return: Snapshot, or the result of cls(response)
:rtype: ~azure.mgmt.containerservice.v2021_11_01_preview.models.Snapshot
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.Snapshot"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
content_type = kwargs.pop('content_type', "application/json") # type: Optional[str]
_json = self._serialize.body(parameters, 'Snapshot')
request = build_create_or_update_request(
subscription_id=self._config.subscription_id,
resource_group_name=resource_group_name,
resource_name=resource_name,
content_type=content_type,
json=_json,
template_url=self.create_or_update.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 201]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
if response.status_code == 200:
deserialized = self._deserialize('Snapshot', pipeline_response)
if response.status_code == 201:
deserialized = self._deserialize('Snapshot', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
create_or_update.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.ContainerService/snapshots/{resourceName}'} # type: ignore
@distributed_trace
def update_tags(
self,
resource_group_name: str,
resource_name: str,
parameters: "_models.TagsObject",
**kwargs: Any
) -> "_models.Snapshot":
"""Updates tags on a snapshot.
Updates tags on a snapshot.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param resource_name: The name of the managed cluster resource.
:type resource_name: str
:param parameters: Parameters supplied to the Update snapshot Tags operation.
:type parameters: ~azure.mgmt.containerservice.v2021_11_01_preview.models.TagsObject
:keyword callable cls: A custom type or function that will be passed the direct response
:return: Snapshot, or the result of cls(response)
:rtype: ~azure.mgmt.containerservice.v2021_11_01_preview.models.Snapshot
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.Snapshot"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
content_type = kwargs.pop('content_type', "application/json") # type: Optional[str]
_json = self._serialize.body(parameters, 'TagsObject')
request = build_update_tags_request(
subscription_id=self._config.subscription_id,
resource_group_name=resource_group_name,
resource_name=resource_name,
content_type=content_type,
json=_json,
template_url=self.update_tags.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = self._deserialize('Snapshot', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
update_tags.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.ContainerService/snapshots/{resourceName}'} # type: ignore
@distributed_trace
def delete(
self,
resource_group_name: str,
resource_name: str,
**kwargs: Any
) -> None:
"""Deletes a snapshot.
Deletes a snapshot.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param resource_name: The name of the managed cluster resource.
:type resource_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: None, or the result of cls(response)
:rtype: None
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType[None]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
request = build_delete_request(
subscription_id=self._config.subscription_id,
resource_group_name=resource_group_name,
resource_name=resource_name,
template_url=self.delete.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 204]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
if cls:
return cls(pipeline_response, None, {})
delete.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.ContainerService/snapshots/{resourceName}'} # type: ignore
|
|
# Copyright (C) 2013 Nippon Telegraph and Telephone Corporation.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import inspect
import logging
import six
import struct
import unittest
from nose.tools import eq_
from ryu.lib.packet import icmp
from ryu.lib.packet import packet_utils
LOG = logging.getLogger(__name__)
class Test_icmp(unittest.TestCase):
echo_id = None
echo_seq = None
echo_data = None
unreach_mtu = None
unreach_data = None
unreach_data_len = None
te_data = None
te_data_len = None
def setUp(self):
self.type_ = icmp.ICMP_ECHO_REQUEST
self.code = 0
self.csum = 0
self.data = b''
self.ic = icmp.icmp(self.type_, self.code, self.csum, self.data)
self.buf = bytearray(struct.pack(
icmp.icmp._PACK_STR, self.type_, self.code, self.csum))
self.csum_calc = packet_utils.checksum(self.buf)
struct.pack_into('!H', self.buf, 2, self.csum_calc)
def setUp_with_echo(self):
self.echo_id = 13379
self.echo_seq = 1
self.echo_data = b'\x30\x0e\x09\x00\x00\x00\x00\x00' \
+ b'\x10\x11\x12\x13\x14\x15\x16\x17' \
+ b'\x18\x19\x1a\x1b\x1c\x1d\x1e\x1f' \
+ b'\x20\x21\x22\x23\x24\x25\x26\x27' \
+ b'\x28\x29\x2a\x2b\x2c\x2d\x2e\x2f' \
+ b'\x30\x31\x32\x33\x34\x35\x36\x37'
self.data = icmp.echo(
id_=self.echo_id, seq=self.echo_seq, data=self.echo_data)
self.type_ = icmp.ICMP_ECHO_REQUEST
self.code = 0
self.ic = icmp.icmp(self.type_, self.code, self.csum, self.data)
self.buf = bytearray(struct.pack(
icmp.icmp._PACK_STR, self.type_, self.code, self.csum))
self.buf += self.data.serialize()
self.csum_calc = packet_utils.checksum(self.buf)
struct.pack_into('!H', self.buf, 2, self.csum_calc)
def setUp_with_dest_unreach(self):
self.unreach_mtu = 10
self.unreach_data = b'abc'
self.unreach_data_len = len(self.unreach_data)
self.data = icmp.dest_unreach(
data_len=self.unreach_data_len, mtu=self.unreach_mtu,
data=self.unreach_data)
self.type_ = icmp.ICMP_DEST_UNREACH
self.code = icmp.ICMP_HOST_UNREACH_CODE
self.ic = icmp.icmp(self.type_, self.code, self.csum, self.data)
self.buf = bytearray(struct.pack(
icmp.icmp._PACK_STR, self.type_, self.code, self.csum))
self.buf += self.data.serialize()
self.csum_calc = packet_utils.checksum(self.buf)
struct.pack_into('!H', self.buf, 2, self.csum_calc)
def setUp_with_TimeExceeded(self):
self.te_data = b'abc'
self.te_data_len = len(self.te_data)
self.data = icmp.TimeExceeded(
data_len=self.te_data_len, data=self.te_data)
self.type_ = icmp.ICMP_TIME_EXCEEDED
self.code = 0
self.ic = icmp.icmp(self.type_, self.code, self.csum, self.data)
self.buf = bytearray(struct.pack(
icmp.icmp._PACK_STR, self.type_, self.code, self.csum))
self.buf += self.data.serialize()
self.csum_calc = packet_utils.checksum(self.buf)
struct.pack_into('!H', self.buf, 2, self.csum_calc)
def test_init(self):
eq_(self.type_, self.ic.type)
eq_(self.code, self.ic.code)
eq_(self.csum, self.ic.csum)
eq_(str(self.data), str(self.ic.data))
def test_init_with_echo(self):
self.setUp_with_echo()
self.test_init()
def test_init_with_dest_unreach(self):
self.setUp_with_dest_unreach()
self.test_init()
def test_init_with_TimeExceeded(self):
self.setUp_with_TimeExceeded()
self.test_init()
def test_parser(self):
_res = icmp.icmp.parser(six.binary_type(self.buf))
if type(_res) is tuple:
res = _res[0]
else:
res = _res
eq_(self.type_, res.type)
eq_(self.code, res.code)
eq_(self.csum_calc, res.csum)
eq_(str(self.data), str(res.data))
def test_parser_with_echo(self):
self.setUp_with_echo()
self.test_parser()
def test_parser_with_dest_unreach(self):
self.setUp_with_dest_unreach()
self.test_parser()
def test_parser_with_TimeExceeded(self):
self.setUp_with_TimeExceeded()
self.test_parser()
def test_serialize(self):
data = bytearray()
prev = None
buf = self.ic.serialize(data, prev)
res = struct.unpack_from(icmp.icmp._PACK_STR, six.binary_type(buf))
eq_(self.type_, res[0])
eq_(self.code, res[1])
eq_(self.csum_calc, res[2])
def test_serialize_with_echo(self):
self.setUp_with_echo()
self.test_serialize()
data = bytearray()
prev = None
buf = self.ic.serialize(data, prev)
echo = icmp.echo.parser(six.binary_type(buf), icmp.icmp._MIN_LEN)
eq_(repr(self.data), repr(echo))
def test_serialize_with_dest_unreach(self):
self.setUp_with_dest_unreach()
self.test_serialize()
data = bytearray()
prev = None
buf = self.ic.serialize(data, prev)
unreach = icmp.dest_unreach.parser(six.binary_type(buf), icmp.icmp._MIN_LEN)
eq_(repr(self.data), repr(unreach))
def test_serialize_with_TimeExceeded(self):
self.setUp_with_TimeExceeded()
self.test_serialize()
data = bytearray()
prev = None
buf = self.ic.serialize(data, prev)
te = icmp.TimeExceeded.parser(six.binary_type(buf), icmp.icmp._MIN_LEN)
eq_(repr(self.data), repr(te))
def test_to_string(self):
icmp_values = {'type': repr(self.type_),
'code': repr(self.code),
'csum': repr(self.csum),
'data': repr(self.data)}
_ic_str = ','.join(['%s=%s' % (k, icmp_values[k])
for k, v in inspect.getmembers(self.ic)
if k in icmp_values])
ic_str = '%s(%s)' % (icmp.icmp.__name__, _ic_str)
eq_(str(self.ic), ic_str)
eq_(repr(self.ic), ic_str)
def test_to_string_with_echo(self):
self.setUp_with_echo()
self.test_to_string()
def test_to_string_with_dest_unreach(self):
self.setUp_with_dest_unreach()
self.test_to_string()
def test_to_string_with_TimeExceeded(self):
self.setUp_with_TimeExceeded()
self.test_to_string()
def test_default_args(self):
ic = icmp.icmp()
buf = ic.serialize(bytearray(), None)
res = struct.unpack(icmp.icmp._PACK_STR, six.binary_type(buf[:4]))
eq_(res[0], 8)
eq_(res[1], 0)
eq_(buf[4:], b'\x00\x00\x00\x00')
# with data
ic = icmp.icmp(type_=icmp.ICMP_DEST_UNREACH, data=icmp.dest_unreach())
buf = ic.serialize(bytearray(), None)
res = struct.unpack(icmp.icmp._PACK_STR, six.binary_type(buf[:4]))
eq_(res[0], 3)
eq_(res[1], 0)
eq_(buf[4:], b'\x00\x00\x00\x00')
def test_json(self):
jsondict = self.ic.to_jsondict()
ic = icmp.icmp.from_jsondict(jsondict['icmp'])
eq_(str(self.ic), str(ic))
def test_json_with_echo(self):
self.setUp_with_echo()
self.test_json()
def test_json_with_dest_unreach(self):
self.setUp_with_dest_unreach()
self.test_json()
def test_json_with_TimeExceeded(self):
self.setUp_with_TimeExceeded()
self.test_json()
class Test_echo(unittest.TestCase):
def setUp(self):
self.id_ = 13379
self.seq = 1
self.data = b'\x30\x0e\x09\x00\x00\x00\x00\x00' \
+ b'\x10\x11\x12\x13\x14\x15\x16\x17' \
+ b'\x18\x19\x1a\x1b\x1c\x1d\x1e\x1f' \
+ b'\x20\x21\x22\x23\x24\x25\x26\x27' \
+ b'\x28\x29\x2a\x2b\x2c\x2d\x2e\x2f' \
+ b'\x30\x31\x32\x33\x34\x35\x36\x37'
self.echo = icmp.echo(
self.id_, self.seq, self.data)
self.buf = struct.pack('!HH', self.id_, self.seq)
self.buf += self.data
def test_init(self):
eq_(self.id_, self.echo.id)
eq_(self.seq, self.echo.seq)
eq_(self.data, self.echo.data)
def test_parser(self):
_res = icmp.echo.parser(self.buf, 0)
if type(_res) is tuple:
res = _res[0]
else:
res = _res
eq_(self.id_, res.id)
eq_(self.seq, res.seq)
eq_(self.data, res.data)
def test_serialize(self):
buf = self.echo.serialize()
res = struct.unpack_from('!HH', six.binary_type(buf))
eq_(self.id_, res[0])
eq_(self.seq, res[1])
eq_(self.data, buf[struct.calcsize('!HH'):])
def test_default_args(self):
ec = icmp.echo()
buf = ec.serialize()
res = struct.unpack(icmp.echo._PACK_STR, six.binary_type(buf))
eq_(res[0], 0)
eq_(res[1], 0)
class Test_dest_unreach(unittest.TestCase):
def setUp(self):
self.mtu = 10
self.data = b'abc'
self.data_len = len(self.data)
self.dest_unreach = icmp.dest_unreach(
data_len=self.data_len, mtu=self.mtu, data=self.data)
self.buf = struct.pack('!xBH', self.data_len, self.mtu)
self.buf += self.data
def test_init(self):
eq_(self.data_len, self.dest_unreach.data_len)
eq_(self.mtu, self.dest_unreach.mtu)
eq_(self.data, self.dest_unreach.data)
def test_parser(self):
_res = icmp.dest_unreach.parser(self.buf, 0)
if type(_res) is tuple:
res = _res[0]
else:
res = _res
eq_(self.data_len, res.data_len)
eq_(self.mtu, res.mtu)
eq_(self.data, res.data)
def test_serialize(self):
buf = self.dest_unreach.serialize()
res = struct.unpack_from('!xBH', six.binary_type(buf))
eq_(self.data_len, res[0])
eq_(self.mtu, res[1])
eq_(self.data, buf[struct.calcsize('!xBH'):])
def test_default_args(self):
du = icmp.dest_unreach()
buf = du.serialize()
res = struct.unpack(icmp.dest_unreach._PACK_STR, six.binary_type(buf))
eq_(res[0], 0)
eq_(res[1], 0)
class Test_TimeExceeded(unittest.TestCase):
def setUp(self):
self.data = b'abc'
self.data_len = len(self.data)
self.te = icmp.TimeExceeded(
data_len=self.data_len, data=self.data)
self.buf = struct.pack('!xBxx', self.data_len)
self.buf += self.data
def test_init(self):
eq_(self.data_len, self.te.data_len)
eq_(self.data, self.te.data)
def test_parser(self):
_res = icmp.TimeExceeded.parser(self.buf, 0)
if type(_res) is tuple:
res = _res[0]
else:
res = _res
eq_(self.data_len, res.data_len)
eq_(self.data, res.data)
def test_serialize(self):
buf = self.te.serialize()
res = struct.unpack_from('!xBxx', six.binary_type(buf))
eq_(self.data_len, res[0])
eq_(self.data, buf[struct.calcsize('!xBxx'):])
def test_default_args(self):
te = icmp.TimeExceeded()
buf = te.serialize()
res = struct.unpack(icmp.TimeExceeded._PACK_STR, six.binary_type(buf))
eq_(res[0], 0)
|
|
#!/usr/bin/python2.4
# Copyright 2009, Google Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""Docbuilder for O3D and o3djs."""
import os
import os.path
import sys
import imp
import types
import glob
import subprocess
import shutil
import re
_java_exe = ''
_output_dir = ''
_third_party_dir = ''
_o3d_third_party_dir = ''
_script_path = os.path.dirname(os.path.realpath(__file__))
_js_copyright = """
/*
* Copyright 2009, Google Inc.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are
* met:
*
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* * Redistributions in binary form must reproduce the above
* copyright notice, this list of conditions and the following disclaimer
* in the documentation and/or other materials provided with the
* distribution.
* * Neither the name of Google Inc. nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
* OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
"""
GlobalsDict = { }
def MakePath(*file_paths):
"""Makes a path absolute given a path relative to this script."""
return os.path.join(_script_path, *file_paths)
def MakeCommandName(name):
"""adds '.exe' if on Windows"""
if os.name == 'nt':
return name + '.exe'
return name
def Execute(args):
"""Executes an external program."""
# Comment the next line in for debugging.
# print "Execute: ", ' '.join(args)
if subprocess.call(args) > 0:
raise RuntimeError('FAILED: ' + ' '.join(args))
def AppendBasePath(folder, filenames):
"""Appends a base path to a ist of files"""
return [os.path.join(folder, filename) for filename in filenames]
def RunNixysa(idl_files, generate, output_dir, nixysa_options):
"""Executes Nixysa."""
Execute([
sys.executable,
MakePath(_o3d_third_party_dir, 'nixysa', 'codegen.py'),
'--binding-module=o3d:%s' % MakePath('..', 'plugin', 'o3d_binding.py'),
'--generate=' + generate,
'--force',
'--output-dir=' + output_dir] +
nixysa_options +
idl_files)
def RunJSDocToolkit(js_files, ezt_output_dir, html_output_dir, prefix, mode,
baseURL, topURL, exports_file):
"""Executes the JSDocToolkit."""
list_filename = MakePath(_output_dir, 'doclist.conf')
f = open(list_filename, 'w')
f.write('{\nD:{\n')
f.write('prefix: "%s",\n' % prefix)
f.write('baseURL: "%s",\n' % baseURL)
f.write('topURL: "%s",\n' % topURL)
f.write('mode: "%s",\n' % mode)
f.write('htmlOutDir: "%s",\n' % html_output_dir.replace('\\', '/'))
f.write('exportsFile: "%s",\n' % exports_file.replace('\\', '/'))
f.write('endMarker: ""\n')
f.write('},\n')
f.write('_: [\n')
for filename in js_files:
f.write('"%s",\n' % filename.replace('\\', '/'))
f.write(']\n}\n')
f.close()
files_dir = MakePath(_third_party_dir, 'jsdoctoolkit', 'files')
Execute([
_java_exe,
'-Djsdoc.dir=%s' % files_dir,
'-jar',
MakePath(files_dir, 'jsrun.jar'),
MakePath(files_dir, 'app', 'run.js'),
'-v',
'-t=%s' % MakePath('jsdoc-toolkit-templates'),
'-d=' + ezt_output_dir,
'-c=' + list_filename])
def DeleteOldDocs(docs_js_outpath):
try:
shutil.rmtree(docs_js_outpath);
except:
pass
def BuildJavaScriptForDocsFromIDLs(idl_files, output_dir):
RunNixysa(idl_files, 'jsheader', output_dir,
['--properties-equal-undefined', '--overloaded-function-docs'])
def BuildJavaScriptForExternsFromIDLs(idl_files, output_dir):
if (os.path.exists(output_dir)):
for filename in glob.glob(os.path.join(output_dir, '*.js')):
os.unlink(filename)
RunNixysa(idl_files, 'jsheader', output_dir, ['--no-return-docs'])
def BuildO3DDocsFromJavaScript(js_files, ezt_output_dir, html_output_dir):
RunJSDocToolkit(js_files, ezt_output_dir, html_output_dir,
'classo3d_1_1_', 'o3d', '', '', '')
def BuildO3DClassHierarchy(html_output_dir):
# TODO(gman): We need to make mutliple graphs. One for Params, one for
# ParamMatrix4, one for RenderNode, one for everythng else.
dot_path = MakePath(_third_party_dir, 'graphviz', 'files', 'bin',
MakeCommandName('dot'))
if os.path.exists(dot_path):
Execute([
dot_path,
'-Tcmapx', '-o' + MakePath(html_output_dir, 'class_hierarchy.map'),
'-Tpng', '-o' + MakePath(html_output_dir, 'class_hierarchy.png'),
MakePath(html_output_dir, 'class_hierarchy.dot')])
def BuildO3DJSDocs(js_files, ezt_output_dir, html_output_dir, exports_file):
# The backslashes below on 'jsdocs/' and '../' must stay.
RunJSDocToolkit(js_files, ezt_output_dir, html_output_dir, 'js_1_0_', 'o3djs',
'jsdocs/', '../', exports_file)
def BuildO3DExternsFile(js_files_dir, extra_externs_file, externs_file):
outfile = open(externs_file, 'w')
filenames = (glob.glob(os.path.join(js_files_dir, '*.js')) +
[extra_externs_file])
for filename in filenames:
print "-----", filename
infile = open(filename, 'r')
lines = infile.readlines()
infile.close()
filtered = []
skipping = False
# strip out @o3dparameter stuff
for line in lines:
if skipping:
if line.startswith(' * @') or line.startswith(' */'):
skipping = False
if not skipping:
if line.startswith(' * @o3dparameter'):
skipping = True
if not skipping:
filtered.append(line)
outfile.write(''.join(filtered))
outfile.close()
def BuildCompiledO3DJS(o3djs_files,
externs_path,
o3d_externs_js_path,
compiled_o3djs_outpath):
Execute([
_java_exe,
'-jar',
MakePath('..', '..', 'o3d-internal', 'jscomp', 'JSCompiler_deploy.jar'),
'--property_renaming', 'OFF',
'--variable_renaming', 'LOCAL',
'--jscomp_error=visibility',
'--jscomp_error=accessControls',
'--strict',
'--externs=%s' % externs_path,
('--externs=%s' % o3d_externs_js_path),
('--js_output_file=%s' % compiled_o3djs_outpath)] +
['-js=%s' % (x, ) for x in o3djs_files]);
# strip out goog.exportSymbol and move o3djs.require to end
file = open(compiled_o3djs_outpath, 'r')
contents = file.read()
file.close()
contents = re.sub(r'goog.exportSymbol\([^\)]*\);', '', contents)
requires = set(re.findall(r'o3djs.require\([^\)]*\);', contents))
contents = re.sub(r'o3djs.require\([^\)]*\);', '', contents)
file = open(compiled_o3djs_outpath, 'w')
file.write(_js_copyright)
file.write(contents)
file.write('\n')
file.write('\n'.join(requires))
file.close()
def CopyStaticFiles(o3d_docs_ezt_outpath, o3d_docs_html_outpath):
files = ['stylesheet.css',
'prettify.css',
'prettify.js',
'tabs.css',
'tab_l.gif',
'tab_r.gif',
'tab_b.gif']
for file in files:
shutil.copyfile(MakePath('jsdoc-toolkit-templates', 'static', file),
MakePath(os.path.join(o3d_docs_ezt_outpath, file)))
shutil.copyfile(MakePath('jsdoc-toolkit-templates', 'static', file),
MakePath(os.path.join(o3d_docs_html_outpath, file)))
def main(argv):
"""Builds the O3D API docs and externs and the o3djs docs."""
global _java_exe
_java_exe = argv[0]
global _third_party_dir
_third_party_dir = argv[1]
global _o3d_third_party_dir
_o3d_third_party_dir = os.path.normpath(
os.path.join(os.path.dirname(__file__), '..', 'third_party'))
# Fix up the python path of subprocesses by setting PYTHONPATH.
pythonpath = os.pathsep.join([MakePath(_o3d_third_party_dir, 'gflags', 'python'),
MakePath(_o3d_third_party_dir, 'ply')])
orig_pythonpath = os.environ.get('PYTHONPATH')
if orig_pythonpath:
pythonpath = os.pathsep.join([pythonpath, orig_pythonpath])
os.environ['PYTHONPATH'] = pythonpath
js_list_filename = MakePath('..', 'samples', 'o3djs', 'js_list.manifest')
idl_list_filename = MakePath('..', 'plugin', 'idl_list.manifest')
js_list_basepath = os.path.dirname(js_list_filename)
idl_list_basepath = os.path.dirname(idl_list_filename)
global _output_dir
_output_dir = argv[2]
docs_outpath = os.path.join(_output_dir, 'documentation')
docs_js_outpath = MakePath(docs_outpath, 'apijs')
externs_js_outpath = MakePath(_output_dir, 'externs')
o3d_docs_ezt_outpath = MakePath(docs_outpath, 'reference')
o3d_docs_html_outpath = MakePath(docs_outpath, 'local_html')
o3djs_docs_ezt_outpath = MakePath(docs_outpath, 'reference', 'jsdocs')
o3djs_docs_html_outpath = MakePath(docs_outpath, 'local_html', 'jsdocs')
o3d_externs_path = MakePath(_output_dir, 'o3d-externs.js')
o3djs_exports_path = MakePath(_output_dir, 'o3d-exports.js')
compiled_o3djs_outpath = MakePath(docs_outpath, 'base.js')
externs_path = MakePath('externs', 'externs.js')
o3d_extra_externs_path = MakePath('externs', 'o3d-extra-externs.js')
js_list = eval(open(js_list_filename, "r").read())
idl_list = eval(open(idl_list_filename, "r").read())
idl_files = AppendBasePath(idl_list_basepath, idl_list)
o3djs_files = AppendBasePath(js_list_basepath, js_list)
# we need to put base.js first?
o3djs_files = (
filter(lambda x: x.endswith('base.js'), o3djs_files) +
filter(lambda x: not x.endswith('base.js'), o3djs_files))
docs_js_files = [os.path.join(
docs_js_outpath,
os.path.splitext(os.path.basename(f))[0] + '.js')
for f in idl_list]
try:
DeleteOldDocs(MakePath(docs_outpath))
BuildJavaScriptForDocsFromIDLs(idl_files, docs_js_outpath)
BuildO3DDocsFromJavaScript([o3d_extra_externs_path] + docs_js_files,
o3d_docs_ezt_outpath, o3d_docs_html_outpath)
BuildO3DClassHierarchy(o3d_docs_html_outpath)
BuildJavaScriptForExternsFromIDLs(idl_files, externs_js_outpath)
BuildO3DExternsFile(externs_js_outpath,
o3d_extra_externs_path,
o3d_externs_path)
BuildO3DJSDocs(o3djs_files + [o3d_externs_path], o3djs_docs_ezt_outpath,
o3djs_docs_html_outpath, o3djs_exports_path)
CopyStaticFiles(o3d_docs_ezt_outpath, o3d_docs_html_outpath)
BuildCompiledO3DJS(o3djs_files + [o3djs_exports_path],
externs_path,
o3d_externs_path,
compiled_o3djs_outpath)
except Exception:
if os.path.exists(compiled_o3djs_outpath):
os.unlink(compiled_o3djs_outpath)
raise
if __name__ == '__main__':
main(sys.argv[1:])
|
|
# Copyright (c) 2013 OpenStack Foundation.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from eventlet import greenthread
from oslo.config import cfg
import sqlalchemy as sa
from sqlalchemy.orm import exc
from neutron.db import model_base
from neutron.db import models_v2
from neutron.extensions import agent as ext_agent
from neutron import manager
from neutron.openstack.common.db import exception as db_exc
from neutron.openstack.common import excutils
from neutron.openstack.common import jsonutils
from neutron.openstack.common import log as logging
from neutron.openstack.common import timeutils
LOG = logging.getLogger(__name__)
cfg.CONF.register_opt(
cfg.IntOpt('agent_down_time', default=75,
help=_("Seconds to regard the agent is down; should be at "
"least twice report_interval, to be sure the "
"agent is down for good.")))
class Agent(model_base.BASEV2, models_v2.HasId):
"""Represents agents running in neutron deployments."""
__table_args__ = (
sa.UniqueConstraint('agent_type', 'host',
name='uniq_agents0agent_type0host'),
)
# L3 agent, DHCP agent, OVS agent, LinuxBridge
agent_type = sa.Column(sa.String(255), nullable=False)
binary = sa.Column(sa.String(255), nullable=False)
# TOPIC is a fanout exchange topic
topic = sa.Column(sa.String(255), nullable=False)
# TOPIC.host is a target topic
host = sa.Column(sa.String(255), nullable=False)
admin_state_up = sa.Column(sa.Boolean, default=True,
nullable=False)
# the time when first report came from agents
created_at = sa.Column(sa.DateTime, nullable=False)
# the time when first report came after agents start
started_at = sa.Column(sa.DateTime, nullable=False)
# updated when agents report
heartbeat_timestamp = sa.Column(sa.DateTime, nullable=False)
# description is note for admin user
description = sa.Column(sa.String(255))
# configurations: a json dict string, I think 4095 is enough
configurations = sa.Column(sa.String(4095), nullable=False)
@property
def is_active(self):
return not AgentDbMixin.is_agent_down(self.heartbeat_timestamp)
class AgentDbMixin(ext_agent.AgentPluginBase):
"""Mixin class to add agent extension to db_plugin_base_v2."""
def _get_agent(self, context, id):
try:
agent = self._get_by_id(context, Agent, id)
except exc.NoResultFound:
raise ext_agent.AgentNotFound(id=id)
return agent
@classmethod
def is_agent_down(cls, heart_beat_time):
return timeutils.is_older_than(heart_beat_time,
cfg.CONF.agent_down_time)
def get_configuration_dict(self, agent_db):
try:
conf = jsonutils.loads(agent_db.configurations)
except Exception:
msg = _('Configuration for agent %(agent_type)s on host %(host)s'
' is invalid.')
LOG.warn(msg, {'agent_type': agent_db.agent_type,
'host': agent_db.host})
conf = {}
return conf
def _make_agent_dict(self, agent, fields=None):
attr = ext_agent.RESOURCE_ATTRIBUTE_MAP.get(
ext_agent.RESOURCE_NAME + 's')
res = dict((k, agent[k]) for k in attr
if k not in ['alive', 'configurations'])
res['alive'] = not AgentDbMixin.is_agent_down(
res['heartbeat_timestamp'])
res['configurations'] = self.get_configuration_dict(agent)
return self._fields(res, fields)
def delete_agent(self, context, id):
with context.session.begin(subtransactions=True):
agent = self._get_agent(context, id)
context.session.delete(agent)
def update_agent(self, context, id, agent):
agent_data = agent['agent']
with context.session.begin(subtransactions=True):
agent = self._get_agent(context, id)
agent.update(agent_data)
return self._make_agent_dict(agent)
def get_agents_db(self, context, filters=None):
query = self._get_collection_query(context, Agent, filters=filters)
return query.all()
def get_agents(self, context, filters=None, fields=None):
return self._get_collection(context, Agent,
self._make_agent_dict,
filters=filters, fields=fields)
def _get_agent_by_type_and_host(self, context, agent_type, host):
query = self._model_query(context, Agent)
try:
agent_db = query.filter(Agent.agent_type == agent_type,
Agent.host == host).one()
return agent_db
except exc.NoResultFound:
raise ext_agent.AgentNotFoundByTypeHost(agent_type=agent_type,
host=host)
except exc.MultipleResultsFound:
raise ext_agent.MultipleAgentFoundByTypeHost(agent_type=agent_type,
host=host)
def get_agent(self, context, id, fields=None):
agent = self._get_agent(context, id)
return self._make_agent_dict(agent, fields)
def _create_or_update_agent(self, context, agent):
with context.session.begin(subtransactions=True):
res_keys = ['agent_type', 'binary', 'host', 'topic']
res = dict((k, agent[k]) for k in res_keys)
configurations_dict = agent.get('configurations', {})
res['configurations'] = jsonutils.dumps(configurations_dict)
current_time = timeutils.utcnow()
try:
agent_db = self._get_agent_by_type_and_host(
context, agent['agent_type'], agent['host'])
res['heartbeat_timestamp'] = current_time
if agent.get('start_flag'):
res['started_at'] = current_time
greenthread.sleep(0)
agent_db.update(res)
except ext_agent.AgentNotFoundByTypeHost:
greenthread.sleep(0)
res['created_at'] = current_time
res['started_at'] = current_time
res['heartbeat_timestamp'] = current_time
res['admin_state_up'] = True
agent_db = Agent(**res)
greenthread.sleep(0)
context.session.add(agent_db)
greenthread.sleep(0)
def create_or_update_agent(self, context, agent):
"""Create or update agent according to report."""
try:
return self._create_or_update_agent(context, agent)
except db_exc.DBDuplicateEntry as e:
with excutils.save_and_reraise_exception() as ctxt:
if e.columns == ['agent_type', 'host']:
# It might happen that two or more concurrent transactions
# are trying to insert new rows having the same value of
# (agent_type, host) pair at the same time (if there has
# been no such entry in the table and multiple agent status
# updates are being processed at the moment). In this case
# having a unique constraint on (agent_type, host) columns
# guarantees that only one transaction will succeed and
# insert a new agent entry, others will fail and be rolled
# back. That means we must retry them one more time: no
# INSERTs will be issued, because
# _get_agent_by_type_and_host() will return the existing
# agent entry, which will be updated multiple times
ctxt.reraise = False
return self._create_or_update_agent(context, agent)
class AgentExtRpcCallback(object):
"""Processes the rpc report in plugin implementations."""
RPC_API_VERSION = '1.0'
START_TIME = timeutils.utcnow()
def __init__(self, plugin=None):
self.plugin = plugin
def report_state(self, context, **kwargs):
"""Report state from agent to server."""
time = kwargs['time']
time = timeutils.parse_strtime(time)
if self.START_TIME > time:
LOG.debug(_("Message with invalid timestamp received"))
return
agent_state = kwargs['agent_state']['agent_state']
if not self.plugin:
self.plugin = manager.NeutronManager.get_plugin()
self.plugin.create_or_update_agent(context, agent_state)
|
|
#!/usr/bin/env python
"""Pathspecs are methods of specifying the path on the client.
The GRR client has a number of drivers to virtualize access to different objects
to create a Virtual File System (VFS) abstraction. These are called 'VFS
Handlers' and they provide typical file-like operations (e.g. read, seek, tell
and stat). It is possible to recursively apply different drivers in the correct
order to arrive at a certain file like object. In order to specify how drivers
should be applied we use 'Path Specifications' or pathspec.
Each VFS handler is constructed from a previous handler and a pathspec. The
pathspec is just a collection of arguments which make sense to the specific VFS
handler. The type of the handler is carried by the pathtype parameter.
On the server the PathSpec is represented as a PathSpec object, and stored
as an attribute of the AFF4 object. This module defines this abstraction.
"""
import fnmatch
import itertools
import posixpath
import re
import logging
from grr.lib import artifact_utils
from grr.lib import rdfvalue
from grr.lib import utils
from grr.lib.rdfvalues import standard as rdf_standard
from grr.lib.rdfvalues import structs as rdf_structs
from grr.proto import jobs_pb2
INTERPOLATED_REGEX = re.compile(r"%%([^%]+?)%%")
# Grouping pattern: e.g. {test.exe,foo.doc,bar.txt}
GROUPING_PATTERN = re.compile("{([^}]+,[^}]+)}")
class PathSpec(rdf_structs.RDFProtoStruct):
"""A path specification.
The pathspec protobuf is a recursive protobuf which contains components. This
class makes it easier to manipulate these structures by providing useful
helpers.
"""
protobuf = jobs_pb2.PathSpec
def __init__(self, initializer=None, age=None, **kwargs):
super(PathSpec, self).__init__(age=age, **kwargs)
# Instantiate from another PathSpec.
if isinstance(initializer, PathSpec):
# pylint: disable=protected-access
self.SetRawData(initializer._CopyRawData())
# pylint: enable=protected-access
self.age = initializer.age
# Allow initialization from a list of protobufs each representing a
# component.
elif isinstance(initializer, list):
for element in initializer:
self.last.SetRawData(element.GetRawData())
# Or we can initialize from a string.
elif isinstance(initializer, str):
self.ParseFromString(initializer)
# Legacy protocol buffer implementation.
elif isinstance(initializer, self.protobuf):
self.ParseFromString(initializer.SerializeToString())
elif initializer is not None:
raise rdfvalue.InitializeError("Unable to initialize")
def __len__(self):
"""Return the total number of path components."""
i = -1
for i, _ in enumerate(self):
pass
return i + 1
def __getitem__(self, item):
for i, element in enumerate(self):
if i == item:
return element
raise IndexError("Pathspec index (%s) out of range" % item)
def __iter__(self):
"""Only iterate over all components from the current pointer."""
element = self
while element.HasField("pathtype"):
yield element
if element.HasField("nested_path"):
element = element.nested_path
else:
break
def Insert(self, index, rdfpathspec=None, **kwarg):
"""Insert a single component at index."""
if rdfpathspec is None:
rdfpathspec = self.__class__(**kwarg)
if index == 0:
# Copy ourselves to a temp copy.
nested_proto = self.__class__()
nested_proto.SetRawData(self.GetRawData())
# Replace ourselves with the new object.
self.SetRawData(rdfpathspec.GetRawData())
# Append the temp copy to the end.
self.last.nested_path = nested_proto
else:
previous = self[index - 1]
rdfpathspec.last.nested_path = previous.nested_path
previous.nested_path = rdfpathspec
def Append(self, component=None, **kwarg):
"""Append a new pathspec component to this pathspec."""
if component is None:
component = self.__class__(**kwarg)
if self.HasField("pathtype"):
self.last.nested_path = component
else:
for k, v in kwarg.items():
setattr(self, k, v)
self.SetRawData(component.GetRawData())
return self
def CollapsePath(self):
return utils.JoinPath(*[x.path for x in self])
def Pop(self, index=0):
"""Removes and returns the pathspec at the specified index."""
if index < 0:
index += len(self)
if index == 0:
result = self.__class__()
result.SetRawData(self.GetRawData())
self.SetRawData(self.nested_path.GetRawData())
else:
# Get the raw protobufs for the previous member.
previous = self[index - 1]
result = previous.nested_path
# Manipulate the previous members protobuf to patch the next component in.
previous.nested_path = result.nested_path
result.nested_path = None
return result
@property
def first(self):
return self
@property
def last(self):
if self.HasField("pathtype") and self.pathtype != self.PathType.UNSET:
return list(self)[-1]
return self
def Dirname(self):
"""Get a new copied object with only the directory path."""
result = self.Copy()
while 1:
last_directory = posixpath.dirname(result.last.path)
if last_directory != "/" or len(result) <= 1:
result.last.path = last_directory
# Make sure to clear the inode information.
result.last.inode = None
break
result.Pop(-1)
return result
def Basename(self):
for component in reversed(self):
basename = posixpath.basename(component.path)
if basename:
return basename
return ""
def Validate(self):
if not self.HasField("pathtype") or self.pathtype == self.PathType.UNSET:
raise ValueError("No path type set in PathSpec.")
class GlobExpression(rdfvalue.RDFString):
"""A glob expression for a client path.
A glob expression represents a set of regular expressions which match files on
the client. The Glob expression supports the following expansions:
1) Client attribute expansions are surrounded with %% characters. They will be
expanded from the client AFF4 object.
2) Groupings are collections of alternates. e.g. {foo.exe,bar.sys}
3) Wild cards like * and ?
"""
context_help_url = "user_manual.html#_path_globbing"
RECURSION_REGEX = re.compile(r"\*\*(\d*)")
def Validate(self):
"""GlobExpression is valid."""
if len(self.RECURSION_REGEX.findall(self._value)) > 1:
raise ValueError("Only one ** is permitted per path: %s." %
self._value)
def Interpolate(self, client=None):
try:
kb = client.Get(client.Schema.KNOWLEDGE_BASE)
if not kb:
raise artifact_utils.KnowledgeBaseInterpolationError(
"Client has no knowledge base")
patterns = artifact_utils.InterpolateKbAttributes(self._value, kb)
except artifact_utils.KnowledgeBaseInterpolationError:
# TODO(user): Deprecate InterpolateClientAttributes() support and
# make KnowledgeBase the default and only option as soon as we're
# confident that it's fully populated.
logging.debug("Can't interpolate glob %s with knowledge base attributes, "
"reverting to client attributes.", utils.SmartUnicode(self))
patterns = self.InterpolateClientAttributes(client=client)
for pattern in patterns:
# Normalize the component path (this allows us to resolve ../
# sequences).
pattern = utils.NormalizePath(pattern.replace("\\", "/"))
for pattern in self.InterpolateGrouping(pattern):
yield pattern
def InterpolateClientAttributes(self, client=None, knowledge_base=None):
"""Interpolate all client attributes in pattern.
Args:
client: The client VFSGRRClient object we interpolate parameters from.
knowledge_base: The knowledge_base to interpolate parameters from.
Yields:
All unique strings generated by expanding the pattern.
We can interpolate from a knowledge base object (used in artifacts) or the
raw Client object, but only one at a time.
NOTE: This will eventually be replaced by the Artifact KnowledgeBase
interpolator but we are maintaining it for now.
"""
if client is None:
attr_accessor = knowledge_base.get
else:
attr_accessor = lambda x: client.GetValuesForAttribute(x, only_one=True)
components = []
offset = 0
for match in INTERPOLATED_REGEX.finditer(self._value):
components.append([self._value[offset:match.start()]])
# Expand the attribute into the set of possibilities:
alternatives = []
# Only get the newest attribute that matches the pattern.
for rdf_value in attr_accessor(match.group(1)):
# Treat string as special because its an iterable :-(
if isinstance(rdf_value, basestring):
alternatives.append(rdf_value)
else:
for value in rdf_value:
value = utils.SmartUnicode(self._value)
if value:
alternatives.append(value)
components.append(set(alternatives))
offset = match.end()
components.append([self._value[offset:]])
# Now calculate the cartesian products of all these sets to form all
# strings.
for vector in itertools.product(*components):
yield "".join(vector)
def InterpolateGrouping(self, pattern):
"""Interpolate inline globbing groups."""
components = []
offset = 0
for match in GROUPING_PATTERN.finditer(pattern):
components.append([pattern[offset:match.start()]])
# Expand the attribute into the set of possibilities:
alternatives = match.group(1).split(",")
components.append(set(alternatives))
offset = match.end()
components.append([pattern[offset:]])
# Now calculate the cartesian products of all these sets to form all
# strings.
for vector in itertools.product(*components):
yield u"".join(vector)
def AsRegEx(self):
"""Return the current glob as a simple regex.
Note: No interpolation is performed.
Returns:
A RegularExpression() object.
"""
return rdf_standard.RegularExpression(
"(?i)^" + fnmatch.translate(self._value))
|
|
#!/usr/bin/env python3
import itertools
import math
from abc import abstractmethod
from itertools import combinations, product
from unittest.mock import MagicMock, patch
import torch
import gpytorch
from .base_test_case import BaseTestCase
def _ensure_symmetric_grad(grad):
"""
A gradient-hook hack to ensure that symmetric matrix gradients are symmetric
"""
res = torch.add(grad, grad.transpose(-1, -2)).mul(0.5)
return res
class RectangularLazyTensorTestCase(BaseTestCase):
@abstractmethod
def create_lazy_tensor(self):
raise NotImplementedError()
@abstractmethod
def evaluate_lazy_tensor(self):
raise NotImplementedError()
def _test_matmul(self, rhs):
lazy_tensor = self.create_lazy_tensor().requires_grad_(True)
lazy_tensor_copy = lazy_tensor.clone().detach_().requires_grad_(True)
evaluated = self.evaluate_lazy_tensor(lazy_tensor_copy)
res = lazy_tensor.matmul(rhs)
actual = evaluated.matmul(rhs)
self.assertAllClose(res, actual)
grad = torch.randn_like(res)
res.backward(gradient=grad)
actual.backward(gradient=grad)
for arg, arg_copy in zip(lazy_tensor.representation(), lazy_tensor_copy.representation()):
if arg_copy.requires_grad and arg_copy.is_leaf and arg_copy.grad is not None:
self.assertAllClose(arg.grad, arg_copy.grad, rtol=1e-3)
def test_add(self):
lazy_tensor = self.create_lazy_tensor()
evaluated = self.evaluate_lazy_tensor(lazy_tensor)
rhs = torch.randn(lazy_tensor.shape)
self.assertAllClose((lazy_tensor + rhs).evaluate(), evaluated + rhs)
rhs = torch.randn(lazy_tensor.matrix_shape)
self.assertAllClose((lazy_tensor + rhs).evaluate(), evaluated + rhs)
rhs = torch.randn(2, *lazy_tensor.shape)
self.assertAllClose((lazy_tensor + rhs).evaluate(), evaluated + rhs)
def test_matmul_vec(self):
lazy_tensor = self.create_lazy_tensor()
rhs = torch.randn(lazy_tensor.size(-1))
# We skip this test if we're dealing with batch LazyTensors
# They shouldn't multiply by a vec
if lazy_tensor.ndimension() > 2:
return
else:
return self._test_matmul(rhs)
def test_matmul_matrix(self):
lazy_tensor = self.create_lazy_tensor()
rhs = torch.randn(*lazy_tensor.batch_shape, lazy_tensor.size(-1), 4)
return self._test_matmul(rhs)
def test_matmul_matrix_broadcast(self):
lazy_tensor = self.create_lazy_tensor()
# Right hand size has one more batch dimension
batch_shape = torch.Size((3, *lazy_tensor.batch_shape))
rhs = torch.randn(*batch_shape, lazy_tensor.size(-1), 4)
self._test_matmul(rhs)
if lazy_tensor.ndimension() > 2:
# Right hand size has one fewer batch dimension
batch_shape = torch.Size(lazy_tensor.batch_shape[1:])
rhs = torch.randn(*batch_shape, lazy_tensor.size(-1), 4)
self._test_matmul(rhs)
# Right hand size has a singleton dimension
batch_shape = torch.Size((*lazy_tensor.batch_shape[:-1], 1))
rhs = torch.randn(*batch_shape, lazy_tensor.size(-1), 4)
self._test_matmul(rhs)
def test_constant_mul(self):
lazy_tensor = self.create_lazy_tensor()
evaluated = self.evaluate_lazy_tensor(lazy_tensor)
self.assertAllClose((lazy_tensor * 5).evaluate(), evaluated * 5)
def test_evaluate(self):
lazy_tensor = self.create_lazy_tensor()
evaluated = self.evaluate_lazy_tensor(lazy_tensor)
self.assertAllClose(lazy_tensor.evaluate(), evaluated)
def test_getitem(self):
lazy_tensor = self.create_lazy_tensor()
evaluated = self.evaluate_lazy_tensor(lazy_tensor)
# Non-batch case
if lazy_tensor.ndimension() == 2:
res = lazy_tensor[1]
actual = evaluated[1]
self.assertAllClose(res, actual)
res = lazy_tensor[0:2].evaluate()
actual = evaluated[0:2]
self.assertAllClose(res, actual)
res = lazy_tensor[:, 0:2].evaluate()
actual = evaluated[:, 0:2]
self.assertAllClose(res, actual)
res = lazy_tensor[0:2, :].evaluate()
actual = evaluated[0:2, :]
self.assertAllClose(res, actual)
res = lazy_tensor[..., 0:2].evaluate()
actual = evaluated[..., 0:2]
self.assertAllClose(res, actual)
res = lazy_tensor[0:2, ...].evaluate()
actual = evaluated[0:2, ...]
self.assertAllClose(res, actual)
res = lazy_tensor[..., 0:2, 2]
actual = evaluated[..., 0:2, 2]
self.assertAllClose(res, actual)
res = lazy_tensor[0:2, ..., 2]
actual = evaluated[0:2, ..., 2]
self.assertAllClose(res, actual)
# Batch case
else:
res = lazy_tensor[1].evaluate()
actual = evaluated[1]
self.assertAllClose(res, actual)
res = lazy_tensor[0:2].evaluate()
actual = evaluated[0:2]
self.assertAllClose(res, actual)
res = lazy_tensor[:, 0:2].evaluate()
actual = evaluated[:, 0:2]
self.assertAllClose(res, actual)
for batch_index in product([1, slice(0, 2, None)], repeat=(lazy_tensor.dim() - 2)):
res = lazy_tensor.__getitem__((*batch_index, slice(0, 1, None), slice(0, 2, None))).evaluate()
actual = evaluated.__getitem__((*batch_index, slice(0, 1, None), slice(0, 2, None)))
self.assertAllClose(res, actual)
res = lazy_tensor.__getitem__((*batch_index, 1, slice(0, 2, None)))
actual = evaluated.__getitem__((*batch_index, 1, slice(0, 2, None)))
self.assertAllClose(res, actual)
res = lazy_tensor.__getitem__((*batch_index, slice(1, None, None), 2))
actual = evaluated.__getitem__((*batch_index, slice(1, None, None), 2))
self.assertAllClose(res, actual)
# Ellipsis
res = lazy_tensor.__getitem__((Ellipsis, slice(1, None, None), 2))
actual = evaluated.__getitem__((Ellipsis, slice(1, None, None), 2))
self.assertAllClose(res, actual)
res = lazy_tensor.__getitem__((slice(1, None, None), Ellipsis, 2))
actual = evaluated.__getitem__((slice(1, None, None), Ellipsis, 2))
self.assertAllClose(res, actual)
def test_getitem_tensor_index(self):
lazy_tensor = self.create_lazy_tensor()
evaluated = self.evaluate_lazy_tensor(lazy_tensor)
# Non-batch case
if lazy_tensor.ndimension() == 2:
index = (torch.tensor([0, 0, 1, 2]), torch.tensor([0, 1, 0, 2]))
res, actual = lazy_tensor[index], evaluated[index]
self.assertAllClose(res, actual)
index = (torch.tensor([0, 0, 1, 2]), slice(None, None, None))
res, actual = gpytorch.delazify(lazy_tensor[index]), evaluated[index]
self.assertAllClose(res, actual)
index = (slice(None, None, None), torch.tensor([0, 0, 1, 2]))
res, actual = gpytorch.delazify(lazy_tensor[index]), evaluated[index]
self.assertAllClose(res, actual)
index = (torch.tensor([0, 0, 1, 2]), Ellipsis)
res, actual = gpytorch.delazify(lazy_tensor[index]), evaluated[index]
self.assertAllClose(res, actual)
index = (Ellipsis, torch.tensor([0, 0, 1, 2]))
res, actual = gpytorch.delazify(lazy_tensor[index]), evaluated[index]
self.assertAllClose(res, actual)
index = (Ellipsis, torch.tensor([0, 0, 1, 2]), torch.tensor([0, 1, 0, 2]))
res, actual = lazy_tensor[index], evaluated[index]
self.assertAllClose(res, actual)
# Batch case
else:
for batch_index in product(
[torch.tensor([0, 1, 1, 0]), slice(None, None, None)], repeat=(lazy_tensor.dim() - 2)
):
index = (*batch_index, torch.tensor([0, 1, 0, 2]), torch.tensor([1, 2, 0, 1]))
res, actual = lazy_tensor[index], evaluated[index]
self.assertAllClose(res, actual)
index = (*batch_index, torch.tensor([0, 1, 0, 2]), slice(None, None, None))
res, actual = gpytorch.delazify(lazy_tensor[index]), evaluated[index]
self.assertAllClose(res, actual)
index = (*batch_index, slice(None, None, None), torch.tensor([0, 1, 2, 1]))
res, actual = gpytorch.delazify(lazy_tensor[index]), evaluated[index]
self.assertAllClose(res, actual)
index = (*batch_index, slice(None, None, None), slice(None, None, None))
res, actual = lazy_tensor[index].evaluate(), evaluated[index]
self.assertAllClose(res, actual)
# Ellipsis
res = lazy_tensor.__getitem__((Ellipsis, torch.tensor([0, 1, 0, 2]), torch.tensor([1, 2, 0, 1])))
actual = evaluated.__getitem__((Ellipsis, torch.tensor([0, 1, 0, 2]), torch.tensor([1, 2, 0, 1])))
self.assertAllClose(res, actual)
res = gpytorch.delazify(
lazy_tensor.__getitem__((torch.tensor([0, 1, 0, 1]), Ellipsis, torch.tensor([1, 2, 0, 1])))
)
actual = evaluated.__getitem__((torch.tensor([0, 1, 0, 1]), Ellipsis, torch.tensor([1, 2, 0, 1])))
self.assertAllClose(res, actual)
def test_permute(self):
lazy_tensor = self.create_lazy_tensor()
if lazy_tensor.dim() >= 4:
evaluated = self.evaluate_lazy_tensor(lazy_tensor)
dims = torch.randperm(lazy_tensor.dim() - 2).tolist()
res = lazy_tensor.permute(*dims, -2, -1).evaluate()
actual = evaluated.permute(*dims, -2, -1)
self.assertAllClose(res, actual)
def test_quad_form_derivative(self):
lazy_tensor = self.create_lazy_tensor().requires_grad_(True)
lazy_tensor_clone = lazy_tensor.clone().detach_().requires_grad_(True)
left_vecs = torch.randn(*lazy_tensor.batch_shape, lazy_tensor.size(-2), 2)
right_vecs = torch.randn(*lazy_tensor.batch_shape, lazy_tensor.size(-1), 2)
deriv_custom = lazy_tensor._quad_form_derivative(left_vecs, right_vecs)
deriv_auto = gpytorch.lazy.LazyTensor._quad_form_derivative(lazy_tensor_clone, left_vecs, right_vecs)
for dc, da in zip(deriv_custom, deriv_auto):
self.assertAllClose(dc, da)
def test_sum(self):
lazy_tensor = self.create_lazy_tensor()
evaluated = self.evaluate_lazy_tensor(lazy_tensor)
self.assertAllClose(lazy_tensor.sum(-1), evaluated.sum(-1))
self.assertAllClose(lazy_tensor.sum(-2), evaluated.sum(-2))
if lazy_tensor.ndimension() > 2:
self.assertAllClose(lazy_tensor.sum(-3).evaluate(), evaluated.sum(-3))
if lazy_tensor.ndimension() > 3:
self.assertAllClose(lazy_tensor.sum(-4).evaluate(), evaluated.sum(-4))
def test_transpose_batch(self):
lazy_tensor = self.create_lazy_tensor()
evaluated = self.evaluate_lazy_tensor(lazy_tensor)
if lazy_tensor.dim() >= 4:
for i, j in combinations(range(lazy_tensor.dim() - 2), 2):
res = lazy_tensor.transpose(i, j).evaluate()
actual = evaluated.transpose(i, j)
self.assertAllClose(res, actual, rtol=1e-4, atol=1e-5)
class LazyTensorTestCase(RectangularLazyTensorTestCase):
should_test_sample = False
skip_slq_tests = False
should_call_cg = True
should_call_lanczos = True
def _test_inv_matmul(self, rhs, lhs=None, cholesky=False):
lazy_tensor = self.create_lazy_tensor().requires_grad_(True)
lazy_tensor_copy = lazy_tensor.clone().detach_().requires_grad_(True)
evaluated = self.evaluate_lazy_tensor(lazy_tensor_copy)
evaluated.register_hook(_ensure_symmetric_grad)
# Create a test right hand side and left hand side
rhs.requires_grad_(True)
rhs_copy = rhs.clone().detach().requires_grad_(True)
if lhs is not None:
lhs.requires_grad_(True)
lhs_copy = lhs.clone().detach().requires_grad_(True)
_wrapped_cg = MagicMock(wraps=gpytorch.utils.linear_cg)
with patch("gpytorch.utils.linear_cg", new=_wrapped_cg) as linear_cg_mock:
with gpytorch.settings.max_cholesky_size(math.inf if cholesky else 0), gpytorch.settings.cg_tolerance(1e-4):
# Perform the inv_matmul
if lhs is not None:
res = lazy_tensor.inv_matmul(rhs, lhs)
actual = lhs_copy @ evaluated.inverse() @ rhs_copy
else:
res = lazy_tensor.inv_matmul(rhs)
actual = evaluated.inverse().matmul(rhs_copy)
self.assertAllClose(res, actual, rtol=0.02, atol=1e-5)
# Perform backward pass
grad = torch.randn_like(res)
res.backward(gradient=grad)
actual.backward(gradient=grad)
for arg, arg_copy in zip(lazy_tensor.representation(), lazy_tensor_copy.representation()):
if arg_copy.requires_grad and arg_copy.is_leaf and arg_copy.grad is not None:
self.assertAllClose(arg.grad, arg_copy.grad, rtol=0.03, atol=1e-5)
self.assertAllClose(rhs.grad, rhs_copy.grad, rtol=0.03, atol=1e-5)
if lhs is not None:
self.assertAllClose(lhs.grad, lhs_copy.grad, rtol=0.03, atol=1e-5)
# Determine if we've called CG or not
if not cholesky and self.__class__.should_call_cg:
self.assertTrue(linear_cg_mock.called)
else:
self.assertFalse(linear_cg_mock.called)
def _test_inv_quad_logdet(self, reduce_inv_quad=True, cholesky=False):
if not self.__class__.skip_slq_tests:
# Forward
lazy_tensor = self.create_lazy_tensor()
evaluated = self.evaluate_lazy_tensor(lazy_tensor)
flattened_evaluated = evaluated.view(-1, *lazy_tensor.matrix_shape)
vecs = torch.randn(*lazy_tensor.batch_shape, lazy_tensor.size(-1), 3, requires_grad=True)
vecs_copy = vecs.clone().detach_().requires_grad_(True)
_wrapped_cg = MagicMock(wraps=gpytorch.utils.linear_cg)
with patch("gpytorch.utils.linear_cg", new=_wrapped_cg) as linear_cg_mock:
with gpytorch.settings.num_trace_samples(256), gpytorch.settings.max_cholesky_size(
math.inf if cholesky else 0
), gpytorch.settings.cg_tolerance(1e-5):
res_inv_quad, res_logdet = lazy_tensor.inv_quad_logdet(
inv_quad_rhs=vecs, logdet=True, reduce_inv_quad=reduce_inv_quad
)
actual_inv_quad = evaluated.inverse().matmul(vecs_copy).mul(vecs_copy).sum(-2)
if reduce_inv_quad:
actual_inv_quad = actual_inv_quad.sum(-1)
actual_logdet = torch.cat(
[torch.logdet(flattened_evaluated[i]).unsqueeze(0) for i in range(lazy_tensor.batch_shape.numel())]
).view(lazy_tensor.batch_shape)
self.assertAllClose(res_inv_quad, actual_inv_quad, rtol=0.01, atol=0.01)
self.assertAllClose(res_logdet, actual_logdet, rtol=0.2, atol=0.03)
if not cholesky and self.__class__.should_call_cg:
self.assertTrue(linear_cg_mock.called)
else:
self.assertFalse(linear_cg_mock.called)
def test_add_diag(self):
lazy_tensor = self.create_lazy_tensor()
evaluated = self.evaluate_lazy_tensor(lazy_tensor)
other_diag = torch.tensor(1.5)
res = lazy_tensor.add_diag(other_diag).evaluate()
actual = evaluated + torch.eye(evaluated.size(-1)).view(
*[1 for _ in range(lazy_tensor.dim() - 2)], evaluated.size(-1), evaluated.size(-1)
).repeat(*lazy_tensor.batch_shape, 1, 1).mul(1.5)
self.assertAllClose(res, actual)
other_diag = torch.tensor([1.5])
res = lazy_tensor.add_diag(other_diag).evaluate()
actual = evaluated + torch.eye(evaluated.size(-1)).view(
*[1 for _ in range(lazy_tensor.dim() - 2)], evaluated.size(-1), evaluated.size(-1)
).repeat(*lazy_tensor.batch_shape, 1, 1).mul(1.5)
self.assertAllClose(res, actual)
other_diag = torch.randn(lazy_tensor.size(-1)).pow(2)
res = lazy_tensor.add_diag(other_diag).evaluate()
actual = evaluated + other_diag.diag().repeat(*lazy_tensor.batch_shape, 1, 1)
self.assertAllClose(res, actual)
for sizes in product([1, None], repeat=(lazy_tensor.dim() - 2)):
batch_shape = [lazy_tensor.batch_shape[i] if size is None else size for i, size in enumerate(sizes)]
other_diag = torch.randn(*batch_shape, lazy_tensor.size(-1)).pow(2)
res = lazy_tensor.add_diag(other_diag).evaluate()
actual = evaluated.clone().detach()
for i in range(other_diag.size(-1)):
actual[..., i, i] = actual[..., i, i] + other_diag[..., i]
self.assertAllClose(res, actual, rtol=1e-2, atol=1e-5)
def test_cholesky(self):
lazy_tensor = self.create_lazy_tensor()
evaluated = self.evaluate_lazy_tensor(lazy_tensor)
for upper in (False, True):
res = lazy_tensor.cholesky(upper=upper).evaluate()
actual = torch.cholesky(evaluated, upper=upper)
self.assertAllClose(res, actual, rtol=1e-3, atol=1e-5)
# TODO: Check gradients
def test_diag(self):
lazy_tensor = self.create_lazy_tensor()
evaluated = self.evaluate_lazy_tensor(lazy_tensor)
res = lazy_tensor.diag()
actual = evaluated.diagonal(dim1=-2, dim2=-1)
actual = actual.view(*lazy_tensor.batch_shape, -1)
self.assertAllClose(res, actual, rtol=1e-2, atol=1e-5)
def test_inv_matmul_vector(self, cholesky=False):
lazy_tensor = self.create_lazy_tensor()
rhs = torch.randn(lazy_tensor.size(-1))
# We skip this test if we're dealing with batch LazyTensors
# They shouldn't multiply by a vec
if lazy_tensor.ndimension() > 2:
return
else:
return self._test_inv_matmul(rhs)
def test_inv_matmul_vector_with_left(self, cholesky=False):
lazy_tensor = self.create_lazy_tensor()
rhs = torch.randn(lazy_tensor.size(-1))
lhs = torch.randn(6, lazy_tensor.size(-1))
# We skip this test if we're dealing with batch LazyTensors
# They shouldn't multiply by a vec
if lazy_tensor.ndimension() > 2:
return
else:
return self._test_inv_matmul(rhs, lhs=lhs)
def test_inv_matmul_vector_with_left_cholesky(self):
lazy_tensor = self.create_lazy_tensor()
rhs = torch.randn(*lazy_tensor.batch_shape, lazy_tensor.size(-1), 5)
lhs = torch.randn(*lazy_tensor.batch_shape, 6, lazy_tensor.size(-1))
return self._test_inv_matmul(rhs, lhs=lhs, cholesky=True)
def test_inv_matmul_matrix(self, cholesky=False):
lazy_tensor = self.create_lazy_tensor()
rhs = torch.randn(*lazy_tensor.batch_shape, lazy_tensor.size(-1), 5)
return self._test_inv_matmul(rhs, cholesky=cholesky)
def test_inv_matmul_matrix_cholesky(self):
return self.test_inv_matmul_matrix(cholesky=True)
def test_inv_matmul_matrix_with_left(self):
lazy_tensor = self.create_lazy_tensor()
rhs = torch.randn(*lazy_tensor.batch_shape, lazy_tensor.size(-1), 5)
lhs = torch.randn(*lazy_tensor.batch_shape, 3, lazy_tensor.size(-1))
return self._test_inv_matmul(rhs, lhs=lhs)
def test_inv_matmul_matrix_broadcast(self):
lazy_tensor = self.create_lazy_tensor()
# Right hand size has one more batch dimension
batch_shape = torch.Size((3, *lazy_tensor.batch_shape))
rhs = torch.randn(*batch_shape, lazy_tensor.size(-1), 5)
self._test_inv_matmul(rhs)
if lazy_tensor.ndimension() > 2:
# Right hand size has one fewer batch dimension
batch_shape = torch.Size(lazy_tensor.batch_shape[1:])
rhs = torch.randn(*batch_shape, lazy_tensor.size(-1), 5)
self._test_inv_matmul(rhs)
# Right hand size has a singleton dimension
batch_shape = torch.Size((*lazy_tensor.batch_shape[:-1], 1))
rhs = torch.randn(*batch_shape, lazy_tensor.size(-1), 5)
self._test_inv_matmul(rhs)
def test_inv_quad_logdet(self):
return self._test_inv_quad_logdet(reduce_inv_quad=False, cholesky=False)
def test_inv_quad_logdet_no_reduce(self):
return self._test_inv_quad_logdet(reduce_inv_quad=True, cholesky=False)
def test_inv_quad_logdet_no_reduce_cholesky(self):
return self._test_inv_quad_logdet(reduce_inv_quad=True, cholesky=True)
def test_prod(self):
with gpytorch.settings.fast_computations(covar_root_decomposition=False):
lazy_tensor = self.create_lazy_tensor()
evaluated = self.evaluate_lazy_tensor(lazy_tensor)
if lazy_tensor.ndimension() > 2:
self.assertAllClose(lazy_tensor.prod(-3).evaluate(), evaluated.prod(-3), atol=1e-2, rtol=1e-2)
if lazy_tensor.ndimension() > 3:
self.assertAllClose(lazy_tensor.prod(-4).evaluate(), evaluated.prod(-4), atol=1e-2, rtol=1e-2)
def test_root_decomposition(self, cholesky=False):
_wrapped_lanczos = MagicMock(wraps=gpytorch.utils.lanczos.lanczos_tridiag)
with patch("gpytorch.utils.lanczos.lanczos_tridiag", new=_wrapped_lanczos) as lanczos_mock:
lazy_tensor = self.create_lazy_tensor()
test_mat = torch.randn(*lazy_tensor.batch_shape, lazy_tensor.size(-1), 5)
with gpytorch.settings.max_cholesky_size(math.inf if cholesky else 0):
root_approx = lazy_tensor.root_decomposition()
res = root_approx.matmul(test_mat)
actual = lazy_tensor.matmul(test_mat)
self.assertAllClose(res, actual, rtol=0.05)
# Make sure that we're calling the correct function
if not cholesky and self.__class__.should_call_lanczos:
self.assertTrue(lanczos_mock.called)
else:
self.assertFalse(lanczos_mock.called)
def test_root_decomposition_cholesky(self):
return self.test_root_decomposition(cholesky=True)
def test_root_inv_decomposition(self):
lazy_tensor = self.create_lazy_tensor()
root_approx = lazy_tensor.root_inv_decomposition()
test_mat = torch.randn(*lazy_tensor.batch_shape, lazy_tensor.size(-1), 5)
res = root_approx.matmul(test_mat)
actual = lazy_tensor.inv_matmul(test_mat)
self.assertAllClose(res, actual, rtol=0.05, atol=0.02)
def test_sample(self):
if self.__class__.should_test_sample:
lazy_tensor = self.create_lazy_tensor()
evaluated = self.evaluate_lazy_tensor(lazy_tensor)
samples = lazy_tensor.zero_mean_mvn_samples(50000)
sample_covar = samples.unsqueeze(-1).matmul(samples.unsqueeze(-2)).mean(0)
self.assertAllClose(sample_covar, evaluated, rtol=0.3, atol=0.3)
def test_sqrt_inv_matmul(self):
lazy_tensor = self.create_lazy_tensor().requires_grad_(True)
if len(lazy_tensor.batch_shape):
return
lazy_tensor_copy = lazy_tensor.clone().detach_().requires_grad_(True)
evaluated = self.evaluate_lazy_tensor(lazy_tensor_copy)
evaluated.register_hook(_ensure_symmetric_grad)
# Create a test right hand side and left hand side
rhs = torch.randn(*lazy_tensor.shape[:-1], 3).requires_grad_(True)
lhs = torch.randn(*lazy_tensor.shape[:-2], 2, lazy_tensor.size(-1)).requires_grad_(True)
rhs_copy = rhs.clone().detach().requires_grad_(True)
lhs_copy = lhs.clone().detach().requires_grad_(True)
# Perform forward pass
with gpytorch.settings.max_cg_iterations(200):
sqrt_inv_matmul_res, inv_quad_res = lazy_tensor.sqrt_inv_matmul(rhs, lhs)
evals, evecs = evaluated.symeig(eigenvectors=True)
matrix_inv_root = evecs @ (evals.sqrt().reciprocal().unsqueeze(-1) * evecs.transpose(-1, -2))
sqrt_inv_matmul_actual = lhs_copy @ matrix_inv_root @ rhs_copy
inv_quad_actual = (lhs_copy @ matrix_inv_root).pow(2).sum(dim=-1)
# Check forward pass
self.assertAllClose(sqrt_inv_matmul_res, sqrt_inv_matmul_actual, rtol=1e-4, atol=1e-3)
self.assertAllClose(inv_quad_res, inv_quad_actual, rtol=1e-4, atol=1e-3)
# Perform backward pass
sqrt_inv_matmul_grad = torch.randn_like(sqrt_inv_matmul_res)
inv_quad_grad = torch.randn_like(inv_quad_res)
((sqrt_inv_matmul_res * sqrt_inv_matmul_grad).sum() + (inv_quad_res * inv_quad_grad).sum()).backward()
((sqrt_inv_matmul_actual * sqrt_inv_matmul_grad).sum() + (inv_quad_actual * inv_quad_grad).sum()).backward()
# Check grads
self.assertAllClose(rhs.grad, rhs_copy.grad, rtol=1e-4, atol=1e-3)
self.assertAllClose(lhs.grad, lhs_copy.grad, rtol=1e-4, atol=1e-3)
for arg, arg_copy in zip(lazy_tensor.representation(), lazy_tensor_copy.representation()):
if arg_copy.requires_grad and arg_copy.is_leaf and arg_copy.grad is not None:
self.assertAllClose(arg.grad, arg_copy.grad, rtol=1e-4, atol=1e-3)
def test_sqrt_inv_matmul_no_lhs(self):
lazy_tensor = self.create_lazy_tensor().requires_grad_(True)
if len(lazy_tensor.batch_shape):
return
lazy_tensor_copy = lazy_tensor.clone().detach_().requires_grad_(True)
evaluated = self.evaluate_lazy_tensor(lazy_tensor_copy)
evaluated.register_hook(_ensure_symmetric_grad)
# Create a test right hand side and left hand side
rhs = torch.randn(*lazy_tensor.shape[:-1], 3).requires_grad_(True)
rhs_copy = rhs.clone().detach().requires_grad_(True)
# Perform forward pass
with gpytorch.settings.max_cg_iterations(200):
sqrt_inv_matmul_res = lazy_tensor.sqrt_inv_matmul(rhs)
evals, evecs = evaluated.symeig(eigenvectors=True)
matrix_inv_root = evecs @ (evals.sqrt().reciprocal().unsqueeze(-1) * evecs.transpose(-1, -2))
sqrt_inv_matmul_actual = matrix_inv_root @ rhs_copy
# Check forward pass
self.assertAllClose(sqrt_inv_matmul_res, sqrt_inv_matmul_actual, rtol=1e-4, atol=1e-3)
# Perform backward pass
sqrt_inv_matmul_grad = torch.randn_like(sqrt_inv_matmul_res)
((sqrt_inv_matmul_res * sqrt_inv_matmul_grad).sum()).backward()
((sqrt_inv_matmul_actual * sqrt_inv_matmul_grad).sum()).backward()
# Check grads
self.assertAllClose(rhs.grad, rhs_copy.grad, rtol=1e-4, atol=1e-3)
for arg, arg_copy in zip(lazy_tensor.representation(), lazy_tensor_copy.representation()):
if arg_copy.requires_grad and arg_copy.is_leaf and arg_copy.grad is not None:
self.assertAllClose(arg.grad, arg_copy.grad, rtol=1e-4, atol=1e-3)
def test_symeig(self):
lazy_tensor = self.create_lazy_tensor().requires_grad_(True)
lazy_tensor_copy = lazy_tensor.clone().detach_().requires_grad_(True)
evaluated = self.evaluate_lazy_tensor(lazy_tensor_copy)
# Perform forward pass
evals_unsorted, evecs_unsorted = lazy_tensor.symeig(eigenvectors=True)
evecs_unsorted = evecs_unsorted.evaluate()
# since LazyTensor.symeig does not sort evals, we do this here for the check
evals, idxr = torch.sort(evals_unsorted, dim=-1, descending=False)
evecs = torch.gather(evecs_unsorted, dim=-1, index=idxr.unsqueeze(-2).expand(evecs_unsorted.shape))
evals_actual, evecs_actual = torch.symeig(evaluated.double(), eigenvectors=True)
evals_actual = evals_actual.to(dtype=evaluated.dtype)
evecs_actual = evecs_actual.to(dtype=evaluated.dtype)
# Check forward pass
self.assertAllClose(evals, evals_actual, rtol=1e-4, atol=1e-3)
lt_from_eigendecomp = evecs @ torch.diag_embed(evals) @ evecs.transpose(-1, -2)
self.assertAllClose(lt_from_eigendecomp, evaluated, rtol=1e-4, atol=1e-3)
# if there are repeated evals, we'll skip checking the eigenvectors for those
any_evals_repeated = False
evecs_abs, evecs_actual_abs = evecs.abs(), evecs_actual.abs()
for idx in itertools.product(*[range(b) for b in evals_actual.shape[:-1]]):
eval_i = evals_actual[idx]
if torch.unique(eval_i.detach()).shape[-1] == eval_i.shape[-1]: # detach to avoid pytorch/pytorch#41389
self.assertAllClose(evecs_abs[idx], evecs_actual_abs[idx], rtol=1e-4, atol=1e-3)
else:
any_evals_repeated = True
# Perform backward pass
symeig_grad = torch.randn_like(evals)
((evals * symeig_grad).sum()).backward()
((evals_actual * symeig_grad).sum()).backward()
# Check grads if there were no repeated evals
if not any_evals_repeated:
for arg, arg_copy in zip(lazy_tensor.representation(), lazy_tensor_copy.representation()):
if arg_copy.requires_grad and arg_copy.is_leaf and arg_copy.grad is not None:
self.assertAllClose(arg.grad, arg_copy.grad, rtol=1e-4, atol=1e-3)
# Test with eigenvectors=False
_, evecs = lazy_tensor.symeig(eigenvectors=False)
self.assertIsNone(evecs)
def test_svd(self):
lazy_tensor = self.create_lazy_tensor().requires_grad_(True)
lazy_tensor_copy = lazy_tensor.clone().detach_().requires_grad_(True)
evaluated = self.evaluate_lazy_tensor(lazy_tensor_copy)
# Perform forward pass
U_unsorted, S_unsorted, V_unsorted = lazy_tensor.svd()
U_unsorted, V_unsorted = U_unsorted.evaluate(), V_unsorted.evaluate()
# since LazyTensor.svd does not sort the singular values, we do this here for the check
S, idxr = torch.sort(S_unsorted, dim=-1, descending=True)
idxr = idxr.unsqueeze(-2).expand(U_unsorted.shape)
U = torch.gather(U_unsorted, dim=-1, index=idxr)
V = torch.gather(V_unsorted, dim=-1, index=idxr)
# compute expected result from full tensor
U_actual, S_actual, V_actual = torch.svd(evaluated.double())
U_actual = U_actual.to(dtype=evaluated.dtype)
S_actual = S_actual.to(dtype=evaluated.dtype)
V_actual = V_actual.to(dtype=evaluated.dtype)
# Check forward pass
self.assertAllClose(S, S_actual, rtol=1e-4, atol=1e-3)
lt_from_svd = U @ torch.diag_embed(S) @ V.transpose(-1, -2)
self.assertAllClose(lt_from_svd, evaluated, rtol=1e-4, atol=1e-3)
# if there are repeated singular values, we'll skip checking the singular vectors
U_abs, U_actual_abs = U.abs(), U_actual.abs()
V_abs, V_actual_abs = V.abs(), V_actual.abs()
any_svals_repeated = False
for idx in itertools.product(*[range(b) for b in S_actual.shape[:-1]]):
Si = S_actual[idx]
if torch.unique(Si.detach()).shape[-1] == Si.shape[-1]: # detach to avoid pytorch/pytorch#41389
self.assertAllClose(U_abs[idx], U_actual_abs[idx], rtol=1e-4, atol=1e-3)
self.assertAllClose(V_abs[idx], V_actual_abs[idx], rtol=1e-4, atol=1e-3)
else:
any_svals_repeated = True
# Perform backward pass
svd_grad = torch.randn_like(S)
((S * svd_grad).sum()).backward()
((S_actual * svd_grad).sum()).backward()
# Check grads if there were no repeated singular values
if not any_svals_repeated:
for arg, arg_copy in zip(lazy_tensor.representation(), lazy_tensor_copy.representation()):
if arg_copy.requires_grad and arg_copy.is_leaf and arg_copy.grad is not None:
self.assertAllClose(arg.grad, arg_copy.grad, rtol=1e-4, atol=1e-3)
|
|
################################################################################
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
################################################################################
import collections
import os
import sys
from pyflink.java_gateway import get_gateway
from pyflink.table.descriptors import (FileSystem, OldCsv, Rowtime, Schema, Kafka,
Elasticsearch, Csv, Avro, Json, CustomConnectorDescriptor,
CustomFormatDescriptor, HBase)
from pyflink.table.table_schema import TableSchema
from pyflink.table.types import DataTypes
from pyflink.testing.test_case_utils import (PyFlinkTestCase, PyFlinkStreamTableTestCase,
PyFlinkBatchTableTestCase,
_load_specific_flink_module_jars)
class FileSystemDescriptorTests(PyFlinkTestCase):
def test_path(self):
file_system = FileSystem()
file_system = file_system.path("/test.csv")
properties = file_system.to_properties()
expected = {'connector.property-version': '1',
'connector.type': 'filesystem',
'connector.path': '/test.csv'}
self.assertEqual(expected, properties)
class KafkaDescriptorTests(PyFlinkTestCase):
@classmethod
def setUpClass(cls):
super(KafkaDescriptorTests, cls).setUpClass()
cls._cxt_clz_loader = get_gateway().jvm.Thread.currentThread().getContextClassLoader()
_load_specific_flink_module_jars('/flink-connectors/flink-connector-kafka')
def test_version(self):
kafka = Kafka().version("0.11")
properties = kafka.to_properties()
expected = {'connector.version': '0.11',
'connector.type': 'kafka',
'connector.startup-mode': 'group-offsets',
'connector.property-version': '1'}
self.assertEqual(expected, properties)
def test_topic(self):
kafka = Kafka().topic("topic1")
properties = kafka.to_properties()
expected = {'connector.type': 'kafka',
'connector.topic': 'topic1',
'connector.startup-mode': 'group-offsets',
'connector.property-version': '1'}
self.assertEqual(expected, properties)
def test_properties(self):
kafka = Kafka().properties({"bootstrap.servers": "localhost:9092"})
properties = kafka.to_properties()
expected = {'connector.type': 'kafka',
'connector.startup-mode': 'group-offsets',
'connector.properties.bootstrap.servers': 'localhost:9092',
'connector.property-version': '1'}
self.assertEqual(expected, properties)
def test_property(self):
kafka = Kafka().property("group.id", "testGroup")
properties = kafka.to_properties()
expected = {'connector.type': 'kafka',
'connector.startup-mode': 'group-offsets',
'connector.properties.group.id': 'testGroup',
'connector.property-version': '1'}
self.assertEqual(expected, properties)
def test_start_from_earliest(self):
kafka = Kafka().start_from_earliest()
properties = kafka.to_properties()
expected = {'connector.type': 'kafka',
'connector.startup-mode': 'earliest-offset',
'connector.property-version': '1'}
self.assertEqual(expected, properties)
def test_start_from_latest(self):
kafka = Kafka().start_from_latest()
properties = kafka.to_properties()
expected = {'connector.type': 'kafka',
'connector.startup-mode': 'latest-offset',
'connector.property-version': '1'}
self.assertEqual(expected, properties)
def test_start_from_group_offsets(self):
kafka = Kafka().start_from_group_offsets()
properties = kafka.to_properties()
expected = {'connector.type': 'kafka',
'connector.startup-mode': 'group-offsets',
'connector.property-version': '1'}
self.assertEqual(expected, properties)
def test_start_from_specific_offsets(self):
kafka = Kafka().start_from_specific_offsets({1: 220, 3: 400})
properties = kafka.to_properties()
expected = {'connector.startup-mode': 'specific-offsets',
'connector.specific-offsets': 'partition:1,offset:220;partition:3,offset:400',
'connector.type': 'kafka',
'connector.property-version': '1'}
self.assertEqual(expected, properties)
def test_start_from_specific_offset(self):
kafka = Kafka().start_from_specific_offset(3, 300)
properties = kafka.to_properties()
expected = {'connector.startup-mode': 'specific-offsets',
'connector.specific-offsets': 'partition:3,offset:300',
'connector.type': 'kafka',
'connector.property-version': '1'}
self.assertEqual(expected, properties)
def test_sink_partitioner_fixed(self):
kafka = Kafka().sink_partitioner_fixed()
properties = kafka.to_properties()
expected = {'connector.sink-partitioner': 'fixed',
'connector.startup-mode': 'group-offsets',
'connector.type': 'kafka',
'connector.property-version': '1'}
self.assertEqual(expected, properties)
def test_sink_partitioner_custom(self):
kafka = Kafka().sink_partitioner_custom(
"org.apache.flink.streaming.connectors.kafka.partitioner.FlinkFixedPartitioner")
properties = kafka.to_properties()
expected = {'connector.sink-partitioner': 'custom',
'connector.sink-partitioner-class':
'org.apache.flink.streaming.connectors.kafka.partitioner.'
'FlinkFixedPartitioner',
'connector.type': 'kafka',
'connector.startup-mode': 'group-offsets',
'connector.property-version': '1'}
self.assertEqual(expected, properties)
def test_sink_partitioner_round_robin(self):
kafka = Kafka().sink_partitioner_round_robin()
properties = kafka.to_properties()
expected = {'connector.sink-partitioner': 'round-robin',
'connector.type': 'kafka',
'connector.startup-mode': 'group-offsets',
'connector.property-version': '1'}
self.assertEqual(expected, properties)
@classmethod
def tearDownClass(cls):
if cls._cxt_clz_loader is not None:
get_gateway().jvm.Thread.currentThread().setContextClassLoader(cls._cxt_clz_loader)
class ElasticsearchDescriptorTest(PyFlinkTestCase):
@classmethod
def setUpClass(cls):
super(ElasticsearchDescriptorTest, cls).setUpClass()
cls._cxt_clz_loader = get_gateway().jvm.Thread.currentThread().getContextClassLoader()
_load_specific_flink_module_jars('/flink-connectors/flink-connector-elasticsearch-base')
def test_version(self):
elasticsearch = Elasticsearch().version("6")
properties = elasticsearch.to_properties()
expected = {'connector.type': 'elasticsearch',
'connector.version': '6',
'connector.property-version': '1'}
self.assertEqual(expected, properties)
def test_host(self):
elasticsearch = Elasticsearch().host("localhost", 9200, "http")
properties = elasticsearch.to_properties()
expected = {'connector.hosts': 'http://localhost:9200',
'connector.type': 'elasticsearch',
'connector.property-version': '1'}
self.assertEqual(expected, properties)
def test_index(self):
elasticsearch = Elasticsearch().index("MyUsers")
properties = elasticsearch.to_properties()
expected = {'connector.index': 'MyUsers',
'connector.type': 'elasticsearch',
'connector.property-version': '1'}
self.assertEqual(expected, properties)
def test_document_type(self):
elasticsearch = Elasticsearch().document_type("user")
properties = elasticsearch.to_properties()
expected = {'connector.document-type': 'user',
'connector.type': 'elasticsearch',
'connector.property-version': '1'}
self.assertEqual(expected, properties)
def test_key_delimiter(self):
elasticsearch = Elasticsearch().key_delimiter("$")
properties = elasticsearch.to_properties()
expected = {'connector.key-delimiter': '$',
'connector.type': 'elasticsearch',
'connector.property-version': '1'}
self.assertEqual(expected, properties)
def test_key_null_literal(self):
elasticsearch = Elasticsearch().key_null_literal("n/a")
properties = elasticsearch.to_properties()
expected = {'connector.key-null-literal': 'n/a',
'connector.type': 'elasticsearch',
'connector.property-version': '1'}
self.assertEqual(expected, properties)
def test_failure_handler_fail(self):
elasticsearch = Elasticsearch().failure_handler_fail()
properties = elasticsearch.to_properties()
expected = {'connector.failure-handler': 'fail',
'connector.type': 'elasticsearch',
'connector.property-version': '1'}
self.assertEqual(expected, properties)
def test_failure_handler_ignore(self):
elasticsearch = Elasticsearch().failure_handler_ignore()
properties = elasticsearch.to_properties()
expected = {'connector.failure-handler': 'ignore',
'connector.type': 'elasticsearch',
'connector.property-version': '1'}
self.assertEqual(expected, properties)
def test_failure_handler_retry_rejected(self):
elasticsearch = Elasticsearch().failure_handler_retry_rejected()
properties = elasticsearch.to_properties()
expected = {'connector.failure-handler': 'retry-rejected',
'connector.type': 'elasticsearch',
'connector.property-version': '1'}
self.assertEqual(expected, properties)
def test_failure_handler_custom(self):
elasticsearch = Elasticsearch().failure_handler_custom(
"org.apache.flink.streaming.connectors.elasticsearch.util.IgnoringFailureHandler")
properties = elasticsearch.to_properties()
expected = {'connector.failure-handler': 'custom',
'connector.failure-handler-class':
'org.apache.flink.streaming.connectors.elasticsearch.util.'
'IgnoringFailureHandler',
'connector.type': 'elasticsearch',
'connector.property-version': '1'}
self.assertEqual(expected, properties)
def test_disable_flush_on_checkpoint(self):
elasticsearch = Elasticsearch().disable_flush_on_checkpoint()
properties = elasticsearch.to_properties()
expected = {'connector.flush-on-checkpoint': 'false',
'connector.type': 'elasticsearch',
'connector.property-version': '1'}
self.assertEqual(expected, properties)
def test_bulk_flush_max_actions(self):
elasticsearch = Elasticsearch().bulk_flush_max_actions(42)
properties = elasticsearch.to_properties()
expected = {'connector.bulk-flush.max-actions': '42',
'connector.type': 'elasticsearch',
'connector.property-version': '1'}
self.assertEqual(expected, properties)
def test_bulk_flush_max_size(self):
elasticsearch = Elasticsearch().bulk_flush_max_size("42 mb")
properties = elasticsearch.to_properties()
expected = {'connector.bulk-flush.max-size': '42 mb',
'connector.type': 'elasticsearch',
'connector.property-version': '1'}
self.assertEqual(expected, properties)
def test_bulk_flush_interval(self):
elasticsearch = Elasticsearch().bulk_flush_interval(2000)
properties = elasticsearch.to_properties()
expected = {'connector.bulk-flush.interval': '2000',
'connector.type': 'elasticsearch',
'connector.property-version': '1'}
self.assertEqual(expected, properties)
def test_bulk_flush_backoff_exponential(self):
elasticsearch = Elasticsearch().bulk_flush_backoff_exponential()
properties = elasticsearch.to_properties()
expected = {'connector.bulk-flush.backoff.type': 'exponential',
'connector.type': 'elasticsearch',
'connector.property-version': '1'}
self.assertEqual(expected, properties)
def test_bulk_flush_backoff_constant(self):
elasticsearch = Elasticsearch().bulk_flush_backoff_constant()
properties = elasticsearch.to_properties()
expected = {'connector.bulk-flush.backoff.type': 'constant',
'connector.type': 'elasticsearch',
'connector.property-version': '1'}
self.assertEqual(expected, properties)
def test_bulk_flush_backoff_max_retries(self):
elasticsearch = Elasticsearch().bulk_flush_backoff_max_retries(3)
properties = elasticsearch.to_properties()
expected = {'connector.bulk-flush.backoff.max-retries': '3',
'connector.type': 'elasticsearch',
'connector.property-version': '1'}
self.assertEqual(expected, properties)
def test_bulk_flush_backoff_delay(self):
elasticsearch = Elasticsearch().bulk_flush_backoff_delay(30000)
properties = elasticsearch.to_properties()
expected = {'connector.bulk-flush.backoff.delay': '30000',
'connector.type': 'elasticsearch',
'connector.property-version': '1'}
self.assertEqual(expected, properties)
def test_connection_max_retry_timeout(self):
elasticsearch = Elasticsearch().connection_max_retry_timeout(3000)
properties = elasticsearch.to_properties()
expected = {'connector.connection-max-retry-timeout': '3000',
'connector.type': 'elasticsearch',
'connector.property-version': '1'}
self.assertEqual(expected, properties)
def test_connection_path_prefix(self):
elasticsearch = Elasticsearch().connection_path_prefix("/v1")
properties = elasticsearch.to_properties()
expected = {'connector.connection-path-prefix': '/v1',
'connector.type': 'elasticsearch',
'connector.property-version': '1'}
self.assertEqual(expected, properties)
@classmethod
def tearDownClass(cls):
if cls._cxt_clz_loader is not None:
get_gateway().jvm.Thread.currentThread().setContextClassLoader(cls._cxt_clz_loader)
class CustomConnectorDescriptorTests(PyFlinkTestCase):
def test_custom_connector(self):
custom_connector = CustomConnectorDescriptor('kafka', 1, True) \
.property('connector.topic', 'topic1')\
.properties({'connector.version': '0.11', 'connector.startup-mode': 'earliest-offset'})
properties = custom_connector.to_properties()
expected = {'connector.type': 'kafka',
'connector.property-version': '1',
'connector.topic': 'topic1',
'connector.version': '0.11',
'connector.startup-mode': 'earliest-offset'}
self.assertEqual(expected, properties)
class HBaseDescriptorTests(PyFlinkTestCase):
@classmethod
def setUpClass(cls):
super(HBaseDescriptorTests, cls).setUpClass()
cls._cxt_clz_loader = get_gateway().jvm.Thread.currentThread().getContextClassLoader()
_load_specific_flink_module_jars('/flink-connectors/flink-connector-hbase-base')
def test_version(self):
hbase = HBase().version("1.4.3")
properties = hbase.to_properties()
expected = {'connector.version': '1.4.3',
'connector.type': 'hbase',
'connector.property-version': '1'}
self.assertEqual(expected, properties)
hbase = HBase().version(1.1)
properties = hbase.to_properties()
expected = {'connector.version': '1.1',
'connector.type': 'hbase',
'connector.property-version': '1'}
self.assertEqual(expected, properties)
def test_table_name(self):
hbase = HBase().table_name('tableName1')
properties = hbase.to_properties()
expected = {'connector.type': 'hbase',
'connector.table-name': 'tableName1',
'connector.property-version': '1'}
self.assertEqual(expected, properties)
def test_zookeeper_quorum(self):
hbase = HBase().zookeeper_quorum("localhost:2181,localhost:2182")
properties = hbase.to_properties()
expected = {'connector.type': 'hbase',
'connector.zookeeper.quorum': 'localhost:2181,localhost:2182',
'connector.property-version': '1'}
self.assertEqual(expected, properties)
def test_zookeeper_node_parent(self):
hbase = HBase().zookeeper_node_parent('/hbase/example-root-znode')
properties = hbase.to_properties()
expected = {'connector.type': 'hbase',
'connector.zookeeper.znode.parent': '/hbase/example-root-znode',
'connector.property-version': '1'}
self.assertEqual(expected, properties)
def test_write_buffer_flush_max_size(self):
hbase = HBase().write_buffer_flush_max_size('1000')
properties = hbase.to_properties()
expected = {'connector.type': 'hbase',
'connector.write.buffer-flush.max-size': '1000 bytes',
'connector.property-version': '1'}
self.assertEqual(expected, properties)
hbase = HBase().write_buffer_flush_max_size(1000)
properties = hbase.to_properties()
self.assertEqual(expected, properties)
hbase = HBase().write_buffer_flush_max_size('10mb')
properties = hbase.to_properties()
expected = {'connector.type': 'hbase',
'connector.write.buffer-flush.max-size': '10 mb',
'connector.property-version': '1'}
self.assertEqual(expected, properties)
def test_write_buffer_flush_max_rows(self):
hbase = HBase().write_buffer_flush_max_rows(10)
properties = hbase.to_properties()
expected = {'connector.type': 'hbase',
'connector.write.buffer-flush.max-rows': '10',
'connector.property-version': '1'}
self.assertEqual(expected, properties)
def test_write_buffer_flush_interval(self):
hbase = HBase().write_buffer_flush_interval('123')
properties = hbase.to_properties()
expected = {'connector.type': 'hbase',
'connector.write.buffer-flush.interval': '123',
'connector.property-version': '1'}
self.assertEqual(expected, properties)
hbase = HBase().write_buffer_flush_interval(123)
properties = hbase.to_properties()
self.assertEqual(expected, properties)
hbase = HBase().write_buffer_flush_interval('123ms')
properties = hbase.to_properties()
expected = {'connector.type': 'hbase',
'connector.write.buffer-flush.interval': '123ms',
'connector.property-version': '1'}
self.assertEqual(expected, properties)
@classmethod
def tearDownClass(cls):
if cls._cxt_clz_loader is not None:
get_gateway().jvm.Thread.currentThread().setContextClassLoader(cls._cxt_clz_loader)
class OldCsvDescriptorTests(PyFlinkTestCase):
def test_field_delimiter(self):
csv = OldCsv().field_delimiter("|")
properties = csv.to_properties()
expected = {'format.field-delimiter': '|',
'format.type': 'csv',
'format.property-version': '1'}
self.assertEqual(expected, properties)
def test_line_delimiter(self):
csv = OldCsv().line_delimiter(";")
expected = {'format.type': 'csv',
'format.property-version': '1',
'format.line-delimiter': ';'}
properties = csv.to_properties()
self.assertEqual(expected, properties)
def test_ignore_parse_errors(self):
csv = OldCsv().ignore_parse_errors()
properties = csv.to_properties()
expected = {'format.ignore-parse-errors': 'true',
'format.type': 'csv',
'format.property-version': '1'}
self.assertEqual(expected, properties)
def test_quote_character(self):
csv = OldCsv().quote_character("*")
properties = csv.to_properties()
expected = {'format.quote-character': '*',
'format.type': 'csv',
'format.property-version': '1'}
self.assertEqual(expected, properties)
def test_comment_prefix(self):
csv = OldCsv().comment_prefix("#")
properties = csv.to_properties()
expected = {'format.comment-prefix': '#',
'format.type': 'csv',
'format.property-version': '1'}
self.assertEqual(expected, properties)
def test_ignore_first_line(self):
csv = OldCsv().ignore_first_line()
properties = csv.to_properties()
expected = {'format.ignore-first-line': 'true',
'format.type': 'csv',
'format.property-version': '1'}
self.assertEqual(expected, properties)
def test_field(self):
csv = OldCsv()
csv.field("a", DataTypes.BIGINT())
csv.field("b", DataTypes.STRING())
csv.field("c", "SQL_TIMESTAMP")
properties = csv.to_properties()
expected = {'format.fields.0.name': 'a',
'format.fields.0.data-type': 'BIGINT',
'format.fields.1.name': 'b',
'format.fields.1.data-type': 'VARCHAR(2147483647)',
'format.fields.2.name': 'c',
'format.fields.2.data-type': 'TIMESTAMP(3)',
'format.type': 'csv',
'format.property-version': '1'}
self.assertEqual(expected, properties)
def test_schema(self):
csv = OldCsv()
schema = TableSchema(["a", "b"], [DataTypes.INT(), DataTypes.STRING()])
csv = csv.schema(schema)
properties = csv.to_properties()
expected = {'format.fields.0.name': 'a',
'format.fields.0.data-type': 'INT',
'format.fields.1.name': 'b',
'format.fields.1.data-type': 'VARCHAR(2147483647)',
'format.type': 'csv',
'format.property-version': '1'}
self.assertEqual(expected, properties)
class CsvDescriptorTests(PyFlinkTestCase):
def test_field_delimiter(self):
csv = Csv().field_delimiter("|")
properties = csv.to_properties()
expected = {'format.field-delimiter': '|',
'format.type': 'csv',
'format.property-version': '1'}
self.assertEqual(expected, properties)
def test_line_delimiter(self):
csv = Csv().line_delimiter(";")
expected = {'format.line-delimiter': ';',
'format.property-version': '1',
'format.type': 'csv'}
properties = csv.to_properties()
self.assertEqual(expected, properties)
def test_quote_character(self):
csv = Csv().quote_character("'")
expected = {'format.quote-character': "'",
'format.property-version': '1',
'format.type': 'csv'}
properties = csv.to_properties()
self.assertEqual(expected, properties)
def test_allow_comments(self):
csv = Csv().allow_comments()
expected = {'format.allow-comments': 'true',
'format.property-version': '1',
'format.type': 'csv'}
properties = csv.to_properties()
self.assertEqual(expected, properties)
def test_ignore_parse_errors(self):
csv = Csv().ignore_parse_errors()
expected = {'format.ignore-parse-errors': 'true',
'format.property-version': '1',
'format.type': 'csv'}
properties = csv.to_properties()
self.assertEqual(expected, properties)
def test_array_element_delimiter(self):
csv = Csv().array_element_delimiter("/")
expected = {'format.array-element-delimiter': '/',
'format.property-version': '1',
'format.type': 'csv'}
properties = csv.to_properties()
self.assertEqual(expected, properties)
def test_escape_character(self):
csv = Csv().escape_character("\\")
expected = {'format.escape-character': '\\',
'format.property-version': '1',
'format.type': 'csv'}
properties = csv.to_properties()
self.assertEqual(expected, properties)
def test_null_literal(self):
csv = Csv().null_literal("null")
expected = {'format.null-literal': 'null',
'format.property-version': '1',
'format.type': 'csv'}
properties = csv.to_properties()
self.assertEqual(expected, properties)
def test_schema(self):
csv = Csv().schema(DataTypes.ROW([DataTypes.FIELD("a", DataTypes.INT()),
DataTypes.FIELD("b", DataTypes.STRING())]))
expected = {'format.schema': 'ROW<a INT, b VARCHAR>',
'format.property-version': '1',
'format.type': 'csv'}
properties = csv.to_properties()
self.assertEqual(expected, properties)
def test_derive_schema(self):
csv = Csv().derive_schema()
expected = {'format.derive-schema': 'true',
'format.property-version': '1',
'format.type': 'csv'}
properties = csv.to_properties()
self.assertEqual(expected, properties)
class AvroDescriptorTest(PyFlinkTestCase):
def test_record_class(self):
avro = Avro().record_class("org.apache.flink.formats.avro.generated.Address")
expected = {'format.record-class': 'org.apache.flink.formats.avro.generated.Address',
'format.property-version': '1',
'format.type': 'avro'}
properties = avro.to_properties()
self.assertEqual(expected, properties)
def test_avro_schema(self):
avro = Avro().avro_schema(
'{"type":"record",'
'"name":"Address",'
'"namespace":"org.apache.flink.formats.avro.generated",'
'"fields":['
'{"name":"num","type":"int"},'
'{"name":"street","type":"string"},'
'{"name":"city","type":"string"},'
'{"name":"state","type":"string"},'
'{"name":"zip","type":"string"}'
']}')
expected = {'format.avro-schema': '{"type":"record",'
'"name":"Address",'
'"namespace":"org.apache.flink.formats.avro.generated",'
'"fields":['
'{"name":"num","type":"int"},'
'{"name":"street","type":"string"},'
'{"name":"city","type":"string"},'
'{"name":"state","type":"string"},'
'{"name":"zip","type":"string"}'
']}',
'format.property-version': '1',
'format.type': 'avro'}
properties = avro.to_properties()
self.assertEqual(expected, properties)
class JsonDescriptorTests(PyFlinkTestCase):
def test_fail_on_missing_field_true(self):
json = Json().fail_on_missing_field(True)
expected = {'format.fail-on-missing-field': 'true',
'format.property-version': '1',
'format.type': 'json'}
properties = json.to_properties()
self.assertEqual(expected, properties)
def test_ignore_parse_errors(self):
json = Json().ignore_parse_errors(True)
expected = {'format.ignore-parse-errors': 'true',
'format.property-version': '1',
'format.type': 'json'}
properties = json.to_properties()
self.assertEqual(expected, properties)
def test_json_schema(self):
json = Json().json_schema(
"{"
"'title': 'Fruit',"
"'type': 'object',"
"'properties': "
"{"
"'name': {'type': 'string'},"
"'count': {'type': 'integer'},"
"'time': "
"{"
"'description': 'row time',"
"'type': 'string',"
"'format': 'date-time'"
"}"
"},"
"'required': ['name', 'count', 'time']"
"}")
expected = {'format.json-schema':
"{"
"'title': 'Fruit',"
"'type': 'object',"
"'properties': {"
"'name': {'type': 'string'},"
"'count': {'type': 'integer'},"
"'time': {"
"'description': 'row time',"
"'type': 'string',"
"'format': 'date-time'}"
"},"
"'required': ['name', 'count', 'time']}",
'format.property-version': '1',
'format.type': 'json'}
properties = json.to_properties()
self.assertEqual(expected, properties)
def test_schema(self):
json = Json().schema(DataTypes.ROW([DataTypes.FIELD("a", DataTypes.INT()),
DataTypes.FIELD("b", DataTypes.STRING())]))
expected = {'format.schema': 'ROW<a INT, b VARCHAR>',
'format.property-version': '1',
'format.type': 'json'}
properties = json.to_properties()
self.assertEqual(expected, properties)
def test_derive_schema(self):
json = Json().derive_schema()
expected = {'format.derive-schema': 'true',
'format.property-version': '1',
'format.type': 'json'}
properties = json.to_properties()
self.assertEqual(expected, properties)
class CustomFormatDescriptorTests(PyFlinkTestCase):
def test_custom_format_descriptor(self):
custom_format = CustomFormatDescriptor('json', 1) \
.property('format.schema', 'ROW<a INT, b VARCHAR>') \
.properties({'format.fail-on-missing-field': 'true'})
expected = {'format.fail-on-missing-field': 'true',
'format.schema': 'ROW<a INT, b VARCHAR>',
'format.property-version': '1',
'format.type': 'json'}
properties = custom_format.to_properties()
self.assertEqual(expected, properties)
class RowTimeDescriptorTests(PyFlinkTestCase):
def test_timestamps_from_field(self):
rowtime = Rowtime().timestamps_from_field("rtime")
properties = rowtime.to_properties()
expected = {'rowtime.timestamps.type': 'from-field', 'rowtime.timestamps.from': 'rtime'}
self.assertEqual(expected, properties)
def test_timestamps_from_source(self):
rowtime = Rowtime().timestamps_from_source()
properties = rowtime.to_properties()
expected = {'rowtime.timestamps.type': 'from-source'}
self.assertEqual(expected, properties)
def test_timestamps_from_extractor(self):
rowtime = Rowtime().timestamps_from_extractor(
"org.apache.flink.table.descriptors.RowtimeTest$CustomExtractor")
properties = rowtime.to_properties()
expected = {
'rowtime.timestamps.type': 'custom',
'rowtime.timestamps.class':
'org.apache.flink.table.descriptors.RowtimeTest$CustomExtractor',
'rowtime.timestamps.serialized':
'rO0ABXNyAD5vcmcuYXBhY2hlLmZsaW5rLnRhYmxlLmRlc2NyaXB0b3JzLlJvd3RpbWVUZXN0JEN1c3R'
'vbUV4dHJhY3RvcoaChjMg55xwAgABTAAFZmllbGR0ABJMamF2YS9sYW5nL1N0cmluZzt4cgA-b3JnLm'
'FwYWNoZS5mbGluay50YWJsZS5zb3VyY2VzLnRzZXh0cmFjdG9ycy5UaW1lc3RhbXBFeHRyYWN0b3Jf1'
'Y6piFNsGAIAAHhwdAACdHM'}
self.assertEqual(expected, properties)
def test_watermarks_periodic_ascending(self):
rowtime = Rowtime().watermarks_periodic_ascending()
properties = rowtime.to_properties()
expected = {'rowtime.watermarks.type': 'periodic-ascending'}
self.assertEqual(expected, properties)
def test_watermarks_periodic_bounded(self):
rowtime = Rowtime().watermarks_periodic_bounded(1000)
properties = rowtime.to_properties()
expected = {'rowtime.watermarks.type': 'periodic-bounded',
'rowtime.watermarks.delay': '1000'}
self.assertEqual(expected, properties)
def test_watermarks_from_source(self):
rowtime = Rowtime().watermarks_from_source()
properties = rowtime.to_properties()
expected = {'rowtime.watermarks.type': 'from-source'}
self.assertEqual(expected, properties)
def test_watermarks_from_strategy(self):
rowtime = Rowtime().watermarks_from_strategy(
"org.apache.flink.table.descriptors.RowtimeTest$CustomAssigner")
properties = rowtime.to_properties()
expected = {
'rowtime.watermarks.type': 'custom',
'rowtime.watermarks.class':
'org.apache.flink.table.descriptors.RowtimeTest$CustomAssigner',
'rowtime.watermarks.serialized':
'rO0ABXNyAD1vcmcuYXBhY2hlLmZsaW5rLnRhYmxlLmRlc2NyaXB0b3JzLlJvd3RpbWVUZXN0JEN1c3R'
'vbUFzc2lnbmVyeDcuDvfbu0kCAAB4cgBHb3JnLmFwYWNoZS5mbGluay50YWJsZS5zb3VyY2VzLndtc3'
'RyYXRlZ2llcy5QdW5jdHVhdGVkV2F0ZXJtYXJrQXNzaWduZXKBUc57oaWu9AIAAHhyAD1vcmcuYXBhY'
'2hlLmZsaW5rLnRhYmxlLnNvdXJjZXMud21zdHJhdGVnaWVzLldhdGVybWFya1N0cmF0ZWd53nt-g2OW'
'aT4CAAB4cA'}
self.assertEqual(expected, properties)
class SchemaDescriptorTests(PyFlinkTestCase):
def test_field(self):
schema = Schema()\
.field("int_field", DataTypes.INT())\
.field("long_field", DataTypes.BIGINT())\
.field("string_field", DataTypes.STRING())\
.field("timestamp_field", DataTypes.TIMESTAMP(3))\
.field("time_field", DataTypes.TIME())\
.field("date_field", DataTypes.DATE())\
.field("double_field", DataTypes.DOUBLE())\
.field("float_field", DataTypes.FLOAT())\
.field("byte_field", DataTypes.TINYINT())\
.field("short_field", DataTypes.SMALLINT())\
.field("boolean_field", DataTypes.BOOLEAN())
properties = schema.to_properties()
expected = {'schema.0.name': 'int_field',
'schema.0.data-type': 'INT',
'schema.1.name': 'long_field',
'schema.1.data-type': 'BIGINT',
'schema.2.name': 'string_field',
'schema.2.data-type': 'VARCHAR(2147483647)',
'schema.3.name': 'timestamp_field',
'schema.3.data-type': 'TIMESTAMP(3)',
'schema.4.name': 'time_field',
'schema.4.data-type': 'TIME(0)',
'schema.5.name': 'date_field',
'schema.5.data-type': 'DATE',
'schema.6.name': 'double_field',
'schema.6.data-type': 'DOUBLE',
'schema.7.name': 'float_field',
'schema.7.data-type': 'FLOAT',
'schema.8.name': 'byte_field',
'schema.8.data-type': 'TINYINT',
'schema.9.name': 'short_field',
'schema.9.data-type': 'SMALLINT',
'schema.10.name': 'boolean_field',
'schema.10.data-type': 'BOOLEAN'}
self.assertEqual(expected, properties)
def test_fields(self):
fields = collections.OrderedDict([
("int_field", DataTypes.INT()),
("long_field", DataTypes.BIGINT()),
("string_field", DataTypes.STRING()),
("timestamp_field", DataTypes.TIMESTAMP(3)),
("time_field", DataTypes.TIME()),
("date_field", DataTypes.DATE()),
("double_field", DataTypes.DOUBLE()),
("float_field", DataTypes.FLOAT()),
("byte_field", DataTypes.TINYINT()),
("short_field", DataTypes.SMALLINT()),
("boolean_field", DataTypes.BOOLEAN())
])
schema = Schema().fields(fields)
properties = schema.to_properties()
expected = {'schema.0.name': 'int_field',
'schema.0.data-type': 'INT',
'schema.1.name': 'long_field',
'schema.1.data-type': 'BIGINT',
'schema.2.name': 'string_field',
'schema.2.data-type': 'VARCHAR(2147483647)',
'schema.3.name': 'timestamp_field',
'schema.3.data-type': 'TIMESTAMP(3)',
'schema.4.name': 'time_field',
'schema.4.data-type': 'TIME(0)',
'schema.5.name': 'date_field',
'schema.5.data-type': 'DATE',
'schema.6.name': 'double_field',
'schema.6.data-type': 'DOUBLE',
'schema.7.name': 'float_field',
'schema.7.data-type': 'FLOAT',
'schema.8.name': 'byte_field',
'schema.8.data-type': 'TINYINT',
'schema.9.name': 'short_field',
'schema.9.data-type': 'SMALLINT',
'schema.10.name': 'boolean_field',
'schema.10.data-type': 'BOOLEAN'}
self.assertEqual(expected, properties)
if sys.version_info[:2] <= (3, 5):
fields = {
"int_field": DataTypes.INT(),
"long_field": DataTypes.BIGINT(),
"string_field": DataTypes.STRING(),
"timestamp_field": DataTypes.TIMESTAMP(3),
"time_field": DataTypes.TIME(),
"date_field": DataTypes.DATE(),
"double_field": DataTypes.DOUBLE(),
"float_field": DataTypes.FLOAT(),
"byte_field": DataTypes.TINYINT(),
"short_field": DataTypes.SMALLINT(),
"boolean_field": DataTypes.BOOLEAN()
}
self.assertRaises(TypeError, Schema().fields, fields)
def test_field_in_string(self):
schema = Schema()\
.field("int_field", 'INT')\
.field("long_field", 'BIGINT')\
.field("string_field", 'VARCHAR')\
.field("timestamp_field", 'SQL_TIMESTAMP')\
.field("time_field", 'SQL_TIME')\
.field("date_field", 'SQL_DATE')\
.field("double_field", 'DOUBLE')\
.field("float_field", 'FLOAT')\
.field("byte_field", 'TINYINT')\
.field("short_field", 'SMALLINT')\
.field("boolean_field", 'BOOLEAN')
properties = schema.to_properties()
expected = {'schema.0.name': 'int_field',
'schema.0.data-type': 'INT',
'schema.1.name': 'long_field',
'schema.1.data-type': 'BIGINT',
'schema.2.name': 'string_field',
'schema.2.data-type': 'VARCHAR',
'schema.3.name': 'timestamp_field',
'schema.3.data-type': 'TIMESTAMP(3)',
'schema.4.name': 'time_field',
'schema.4.data-type': 'TIME(0)',
'schema.5.name': 'date_field',
'schema.5.data-type': 'DATE',
'schema.6.name': 'double_field',
'schema.6.data-type': 'DOUBLE',
'schema.7.name': 'float_field',
'schema.7.data-type': 'FLOAT',
'schema.8.name': 'byte_field',
'schema.8.data-type': 'TINYINT',
'schema.9.name': 'short_field',
'schema.9.data-type': 'SMALLINT',
'schema.10.name': 'boolean_field',
'schema.10.data-type': 'BOOLEAN'}
self.assertEqual(expected, properties)
def test_from_origin_field(self):
schema = Schema()\
.field("int_field", DataTypes.INT())\
.field("long_field", DataTypes.BIGINT()).from_origin_field("origin_field_a")\
.field("string_field", DataTypes.STRING())
properties = schema.to_properties()
expected = {'schema.0.name': 'int_field',
'schema.0.data-type': 'INT',
'schema.1.name': 'long_field',
'schema.1.data-type': 'BIGINT',
'schema.1.from': 'origin_field_a',
'schema.2.name': 'string_field',
'schema.2.data-type': 'VARCHAR(2147483647)'}
self.assertEqual(expected, properties)
def test_proctime(self):
schema = Schema()\
.field("int_field", DataTypes.INT())\
.field("ptime", DataTypes.BIGINT()).proctime()\
.field("string_field", DataTypes.STRING())
properties = schema.to_properties()
expected = {'schema.0.name': 'int_field',
'schema.0.data-type': 'INT',
'schema.1.name': 'ptime',
'schema.1.data-type': 'BIGINT',
'schema.1.proctime': 'true',
'schema.2.name': 'string_field',
'schema.2.data-type': 'VARCHAR(2147483647)'}
self.assertEqual(expected, properties)
def test_rowtime(self):
schema = Schema()\
.field("int_field", DataTypes.INT())\
.field("long_field", DataTypes.BIGINT())\
.field("rtime", DataTypes.BIGINT())\
.rowtime(
Rowtime().timestamps_from_field("long_field").watermarks_periodic_bounded(5000))\
.field("string_field", DataTypes.STRING())
properties = schema.to_properties()
print(properties)
expected = {'schema.0.name': 'int_field',
'schema.0.data-type': 'INT',
'schema.1.name': 'long_field',
'schema.1.data-type': 'BIGINT',
'schema.2.name': 'rtime',
'schema.2.data-type': 'BIGINT',
'schema.2.rowtime.timestamps.type': 'from-field',
'schema.2.rowtime.timestamps.from': 'long_field',
'schema.2.rowtime.watermarks.type': 'periodic-bounded',
'schema.2.rowtime.watermarks.delay': '5000',
'schema.3.name': 'string_field',
'schema.3.data-type': 'VARCHAR(2147483647)'}
self.assertEqual(expected, properties)
def test_schema(self):
table_schema = TableSchema(["a", "b"], [DataTypes.INT(), DataTypes.STRING()])
schema = Schema().schema(table_schema)
properties = schema.to_properties()
expected = {'schema.0.name': 'a',
'schema.0.data-type': 'INT',
'schema.1.name': 'b',
'schema.1.data-type': 'VARCHAR(2147483647)'}
self.assertEqual(expected, properties)
class AbstractTableDescriptorTests(object):
def test_with_format(self):
descriptor = self.t_env.connect(FileSystem())
descriptor = descriptor.with_format(OldCsv().field("a", "INT"))
properties = descriptor.to_properties()
expected = {'format.type': 'csv',
'format.property-version': '1',
'format.fields.0.name': 'a',
'format.fields.0.data-type': 'INT',
'connector.property-version': '1',
'connector.type': 'filesystem'}
assert properties == expected
def test_with_schema(self):
descriptor = self.t_env.connect(FileSystem())
descriptor = descriptor.with_format(OldCsv()).with_schema(Schema().field("a", "INT"))
properties = descriptor.to_properties()
expected = {'schema.0.name': 'a',
'schema.0.data-type': 'INT',
'format.type': 'csv',
'format.property-version': '1',
'connector.type': 'filesystem',
'connector.property-version': '1'}
assert properties == expected
def test_register_temporary_table(self):
self.env.set_parallelism(1)
source_path = os.path.join(self.tempdir + '/streaming.csv')
field_names = ["a", "b", "c"]
field_types = [DataTypes.INT(), DataTypes.STRING(), DataTypes.STRING()]
data = [(1, "Hi", "Hello"), (2, "Hello", "Hello")]
self.prepare_csv_source(source_path, data, field_types, field_names)
sink_path = os.path.join(self.tempdir + '/streaming2.csv')
if os.path.isfile(sink_path):
os.remove(sink_path)
t_env = self.t_env
t_env.connect(FileSystem().path(source_path))\
.with_format(OldCsv()
.field_delimiter(',')
.field("a", DataTypes.INT())
.field("b", DataTypes.STRING())
.field("c", DataTypes.STRING()))\
.with_schema(Schema()
.field("a", DataTypes.INT())
.field("b", DataTypes.STRING())
.field("c", DataTypes.STRING()))\
.create_temporary_table("source")
t_env.connect(FileSystem().path(sink_path))\
.with_format(OldCsv()
.field_delimiter(',')
.field("a", DataTypes.INT())
.field("b", DataTypes.STRING())
.field("c", DataTypes.STRING()))\
.with_schema(Schema()
.field("a", DataTypes.INT())
.field("b", DataTypes.STRING())
.field("c", DataTypes.STRING()))\
.create_temporary_table("sink")
t_env.from_path("source").select("a + 1, b, c").execute_insert("sink").wait()
with open(sink_path, 'r') as f:
lines = f.read()
assert lines == '2,Hi,Hello\n' + "3,Hello,Hello\n"
class StreamTableDescriptorTests(PyFlinkStreamTableTestCase, AbstractTableDescriptorTests):
def test_in_append_mode(self):
descriptor = self.t_env.connect(FileSystem())
descriptor = descriptor\
.with_format(OldCsv())\
.in_append_mode()
properties = descriptor.to_properties()
expected = {'update-mode': 'append',
'format.type': 'csv',
'format.property-version': '1',
'connector.property-version': '1',
'connector.type': 'filesystem'}
assert properties == expected
def test_in_retract_mode(self):
descriptor = self.t_env.connect(FileSystem())
descriptor = descriptor \
.with_format(OldCsv()) \
.in_retract_mode()
properties = descriptor.to_properties()
expected = {'update-mode': 'retract',
'format.type': 'csv',
'format.property-version': '1',
'connector.property-version': '1',
'connector.type': 'filesystem'}
assert properties == expected
def test_in_upsert_mode(self):
descriptor = self.t_env.connect(FileSystem())
descriptor = descriptor \
.with_format(OldCsv()) \
.in_upsert_mode()
properties = descriptor.to_properties()
expected = {'update-mode': 'upsert',
'format.type': 'csv',
'format.property-version': '1',
'connector.property-version': '1',
'connector.type': 'filesystem'}
assert properties == expected
class BatchTableDescriptorTests(PyFlinkBatchTableTestCase, AbstractTableDescriptorTests):
pass
if __name__ == '__main__':
import unittest
try:
import xmlrunner
testRunner = xmlrunner.XMLTestRunner(output='target/test-reports')
except ImportError:
testRunner = None
unittest.main(testRunner=testRunner, verbosity=2)
|
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Copyright 2010 Zuza Software Foundation
#
# This file is part of the Translate Toolkit.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, see <http://www.gnu.org/licenses/>.
"""Manage the Universal Terminology eXchange (UTX) format
UTX is a format for terminology exchange, designed it seems with Machine
Translation (MT) as it's primary consumer. The format is created by
the Asia-Pacific Association for Machine Translation (AAMT).
It is a bilingual base class derived format with :class:`UtxFile`
and :class:`UtxUnit` providing file and unit level access.
The format can manage monolingual dictionaries but these classes don't
implement that.
Specification
The format is implemented according to UTX v1.0 (No longer available from
their website. The current `UTX version
<http://www.aamt.info/english/utx/#Download>`_ may be downloaded instead).
Format Implementation
The UTX format is a Tab Seperated Value (TSV) file in UTF-8. The
first two lines are headers with subsequent lines containing a
single source target definition.
Encoding
The files are UTF-8 encoded with no BOM and CR+LF line terminators.
"""
import csv
import sys
import time
from translate.storage import base
class UtxDialect(csv.Dialect):
"""Describe the properties of an UTX generated TAB-delimited dictionary
file."""
delimiter = "\t"
# The spec says \r\n but there are older version < 1.0 with just \n
# FIXME if we find older specs then lets see if we can support these
# differences
lineterminator = "\r\n"
quoting = csv.QUOTE_NONE
csv.register_dialect("utx", UtxDialect)
class UtxHeader:
"""A UTX header entry
A UTX header is a single line that looks like this::
#UTX-S <version>; < source language >/< target language>;
<date created>; <optional fields (creator, license, etc.)>
Where::
- UTX-S version is currently 1.00.
- Source language/target language: ISO 639, 3166 formats.
In the case of monolingual dictionary, target language should be
omitted.
- Date created: ISO 8601 format
- Optional fields (creator, license, etc.)
"""
class UtxUnit(base.TranslationUnit):
"""A UTX dictionary unit"""
def __init__(self, source=None):
self._dict = {}
if source:
self.source = source
super(UtxUnit, self).__init__(source)
def getdict(self):
"""Get the dictionary of values for a UTX line"""
return self._dict
def setdict(self, newdict):
"""Set the dictionary of values for a UTX line
:param newdict: a new dictionary with UTX line elements
:type newdict: Dict
"""
# TODO First check that the values are OK
self._dict = newdict
dict = property(getdict, setdict)
def _get_field(self, key):
if key not in self._dict:
return None
elif self._dict[key]:
return self._dict[key].decode('utf-8')
else:
return ""
def _set_field(self, key, newvalue):
# FIXME update the header date
if newvalue is None:
self._dict[key] = None
if isinstance(newvalue, unicode):
newvalue = newvalue.encode('utf-8')
if not key in self._dict or newvalue != self._dict[key]:
self._dict[key] = newvalue
def getnotes(self, origin=None):
return self._get_field('comment')
def addnote(self, text, origin=None, position="append"):
currentnote = self._get_field('comment')
if position == "append" and currentnote is not None and currentnote != u'':
self._set_field('comment', currentnote + '\n' + text)
else:
self._set_field('comment', text)
def removenotes(self):
self._set_field('comment', u'')
def getsource(self):
return self._get_field('src')
def setsource(self, newsource):
self._rich_source = None
return self._set_field('src', newsource)
source = property(getsource, setsource)
def gettarget(self):
return self._get_field('tgt')
def settarget(self, newtarget):
self._rich_target = None
return self._set_field('tgt', newtarget)
target = property(gettarget, settarget)
def settargetlang(self, newlang):
self._dict['target-lang'] = newlang
targetlang = property(None, settargetlang)
def __str__(self):
return str(self._dict)
def istranslated(self):
return bool(self._dict.get('tgt', None))
class UtxFile(base.TranslationStore):
"""A UTX dictionary file"""
Name = _("UTX Dictionary")
Mimetypes = ["text/x-utx"]
Extensions = ["utx"]
def __init__(self, inputfile=None, unitclass=UtxUnit):
"""Construct an UTX dictionary, optionally reading in from
inputfile."""
self.UnitClass = unitclass
base.TranslationStore.__init__(self, unitclass=unitclass)
self.filename = ''
self.extension = ''
self._fieldnames = ['src', 'tgt', 'src:pos']
self._header = {"version": "1.00",
"source_language": "en",
"date_created": time.strftime("%FT%TZ%z", time.localtime(time.time()))}
if inputfile is not None:
self.parse(inputfile)
def _read_header(self, header=None):
"""Read a UTX header"""
if header is None:
self._fieldnames = ['src', 'tgt', 'src:pos']
# FIXME make the header properly
self._header = {"version": "1.00"}
return
header_lines = []
for line in header.split(UtxDialect.lineterminator):
if line.startswith("#"):
header_lines.append(line)
else:
break
self._header = {}
header_components = []
for line in header_lines[:-1]:
header_components += line[1:].split(";")
self._header["version"] = header_components[0].replace("UTX-S ", "")
languages = header_components[1].strip().split("/")
self._header["source_language"] = languages[0]
self._header["target_language"] = languages[1] or None
self._header["date_created"] = header_components[2].strip()
for data in header_components[3:]:
key, value = data.strip().split(":")
self._header[key] = value.strip()
self._fieldnames = header_lines[-1:][0].replace("#", ""). split('\t')
return len(header_lines)
def _write_header(self):
"""Create a UTX header"""
header = "#UTX-S %(version)s; %(src)s/%(tgt)s; %(date)s" % \
{"version": self._header["version"],
"src": self._header["source_language"],
"tgt": self._header.get("target_language", ""),
"date": self._header["date_created"],
}
items = []
for key, value in self._header.iteritems():
if key in ["version", "source_language", "target_language", "date_created"]:
continue
items.append("%s: %s" % (key, value))
if len(items):
items = "; ".join(items)
header += "; " + items
header += UtxDialect.lineterminator
header += "#" + "\t".join(self._fieldnames) + UtxDialect.lineterminator
return header
def getsourcelanguage(self):
return self._header.get("source_language", None)
def setsourcelanguage(self, sourcelanguage):
self._header["source_language"] = sourcelanguage
def gettargetlanguage(self):
return self._header.get("target_language", None)
def settargetlanguage(self, targetlanguage):
self._header["target_language"] = targetlanguage
def parse(self, input):
"""parsese the given file or file source string"""
if hasattr(input, 'name'):
self.filename = input.name
elif not getattr(self, 'filename', ''):
self.filename = ''
if hasattr(input, "read"):
tmsrc = input.read()
input.close()
input = tmsrc
try:
header_length = self._read_header(input)
except:
raise base.ParseError("Cannot parse header")
lines = csv.DictReader(input.split(UtxDialect.lineterminator)[header_length:],
fieldnames=self._fieldnames,
dialect="utx")
for line in lines:
newunit = UtxUnit()
newunit.dict = line
self.addunit(newunit)
def __str__(self):
output = csv.StringIO()
writer = csv.DictWriter(output, fieldnames=self._fieldnames,
dialect="utx")
unit_count = 0
for unit in self.units:
if unit.istranslated():
unit_count += 1
writer.writerow(unit.dict)
if unit_count == 0:
return ""
output.reset()
return self._write_header() + "".join(output.readlines())
|
|
#!/usr/bin/env python
#
# mallard2man.py
#
# Copyright (C) 2014 MongoDB, Inc.
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
COPYRIGHT_HOLDER = "MongoDB, Inc."
GROUP = "MongoDB C Driver"
BUG_URL = 'https://jira.mongodb.org/browse/CDRIVER'
"""
This script is mean to convert a fairly basic mallard format documentation
page to a groff styled man page.
"""
import os
import re
import sys
import codecs
from datetime import datetime
from xml.etree import ElementTree
INCLUDE = '{http://www.w3.org/2001/XInclude}include'
TITLE = '{http://projectmallard.org/1.0/}title'
SUBTITLE = '{http://projectmallard.org/1.0/}subtitle'
SECTION = '{http://projectmallard.org/1.0/}section'
INFO = '{http://projectmallard.org/1.0/}info'
ITEM = '{http://projectmallard.org/1.0/}item'
LISTING = '{http://projectmallard.org/1.0/}listing'
LIST = '{http://projectmallard.org/1.0/}list'
LINK = '{http://projectmallard.org/1.0/}link'
LINKS = '{http://projectmallard.org/1.0/}links'
SYNOPSIS = '{http://projectmallard.org/1.0/}synopsis'
CODE = '{http://projectmallard.org/1.0/}code'
P = '{http://projectmallard.org/1.0/}p'
SCREEN = '{http://projectmallard.org/1.0/}screen'
EM = '{http://projectmallard.org/1.0/}em'
NOTE = '{http://projectmallard.org/1.0/}note'
TABLE = '{http://projectmallard.org/1.0/}table'
TR = '{http://projectmallard.org/1.0/}tr'
TD = '{http://projectmallard.org/1.0/}td'
OUTPUT = '{http://projectmallard.org/1.0/}output'
# Matches "\" and "-", but not "\-".
replaceables = re.compile(r'(\\(?!-))|((?<!\\)-)')
class Convert(object):
title = None
subtitle = None
sections = None
relpath = None
def __init__(self, inFile, outFile, section):
self.inFile = inFile
self.relpath = os.path.dirname(inFile)
self.outFile = outFile
self.section = section
self.sections = []
# Map: section id -> section element.
self.sections_map = {}
def _parse(self):
self.tree = ElementTree.ElementTree()
self.tree.parse(open(self.inFile))
self.root = self.tree.getroot()
# Python's standard ElementTree doesn't store an element's parent on
# the element. Make a child->parent map.
try:
iterator = self.tree.iter()
except AttributeError:
# Python 2.6.
iterator = self.tree.getiterator()
self.parent_map = dict((c, p) for p in iterator for c in p)
def _get_parent(self, ele):
return self.parent_map[ele]
def _extract(self):
# Extract the title and subtitle.
for child in self.root.getchildren():
if child.tag == TITLE:
# A title like "Version Checks" can't have spaces, otherwise
# the "whatis" entry can't be parsed from the man page title.
self.title = child.text.strip().replace(' ', '_')
elif child.tag == SUBTITLE:
self.subtitle = child.text.strip()
elif child.tag == SECTION:
if child.get('id'):
self.sections_map[child.get('id')] = child
self.sections.append(child)
if not self.subtitle and 'description' in self.sections_map:
# No "subtitle" element, use description section title as subtitle.
self.subtitle = self._section_text(self.sections_map['description'])
def _section_text(self, section):
# Find <section id="description"><p>some text</p></section>.
for child in section:
if child.tag != TITLE:
return self._textify_elem(child)
def _textify_elem(self, elem):
return ''.join(elem.itertext()).strip()
def _writeComment(self, text=''):
lines = text.split('\n')
for line in lines:
self.outFile.write('.\\" ')
self.outFile.write(line)
self.outFile.write('\n')
def _escape_char(self, match):
c = match.group(0)
if c == "-":
return r"\(hy"
elif c == "\\":
return "\\e"
assert False, "invalid char passed to _escape_char: %r" % c
def _escape(self, text):
# Avoid "hyphen-used-as-minus-sign" lintian warning about man pages,
# and escape text like "\0" as "\\0". We'll replace all "-" with "\(hy",
# which is an explicit hyphen, but leave alone the first line's
# "name \- description" text.
return replaceables.sub(self._escape_char, text)
def _write(self, text):
self._write_noescape(self._escape(text))
def _write_noescape(self, text):
self.outFile.write(text)
def _writeCommand(self, text):
self._write(text)
self._write('\n')
def _writeLine(self, text):
if text is not None:
text = text.strip()
if text.startswith('.'):
text = '\\&' + text
self._write(text)
self._write('\n')
def _generateHeader(self):
year = datetime.utcnow().year
self._writeComment('This manpage is Copyright (C) %s %s' % (year, COPYRIGHT_HOLDER))
self._writeComment('')
self._writeComment(
"Permission is granted to copy, distribute and/or modify this document\n"
"under the terms of the GNU Free Documentation License, Version 1.3\n"
"or any later version published by the Free Software Foundation;\n"
"with no Invariant Sections, no Front-Cover Texts, and no Back-Cover Texts.\n"
"A copy of the license is included in the section entitled \"GNU\n"
"Free Documentation License\".")
self._writeComment('')
date = datetime.fromtimestamp(int(os.stat(self.inFile).st_mtime)).strftime('%Y-%m-%d')
title = self.title.replace('()','').upper()
self._write('.TH "%s" "%s" "%s" "%s"\n' % (title, self.section, date, GROUP))
self._write('.SH NAME\n')
self._write_noescape('%s \\- %s\n' % (self.title, self.subtitle))
def _generateSection(self, section):
# Try to render the title first
for child in section.getchildren():
if child.tag == TITLE:
s = child.text.strip().upper()
self._writeCommand('.SH "%s"' % s)
for child in section.getchildren():
self._generateElement(child)
if child.tail:
self._writeLine(child.tail)
def _generateSynopsis(self, synopsis):
self._writeCommand('.nf')
for child in synopsis.getchildren():
self._generateElement(child)
if child.tail:
self._writeLine(child.tail)
self._writeCommand('.fi')
def _generateCode(self, code):
text = code.text
is_synopsis = self._get_parent(code).tag.endswith('synopsis')
if text and '\n' not in text and not is_synopsis:
text = text.replace('()', '(%s)' % self.section)
self._writeCommand('.B ' + text)
else:
self._writeCommand('.nf')
self._writeLine(code.text)
for child in code.getchildren():
self._generateElement(child)
self._writeCommand('.fi')
def _generateNote(self, note):
self._writeCommand('.B NOTE')
self._writeCommand('.RS')
for child in note.getchildren():
self._generateElement(child)
if child.tail:
self._writeLine(child.tail)
self._writeCommand('.RE')
def _generateP(self, p):
if p.text:
self._writeLine(p.text)
for child in p.getchildren():
self._generateElement(child)
if child.tail:
self._writeLine(child.tail)
def _generateScreen(self, screen):
for child in screen.getchildren():
self._generateElement(child)
def _generateListing(self, listing):
for child in listing.getchildren():
self._generateElement(child)
def _generateList(self, l):
for child in l.getchildren():
self._generateElement(child)
def _generateEM(self, em):
self._writeCommand('.B %s' % em.text)
def _generateOutput(self, output):
self._generateCode(output)
def _generateItem(self, item):
self._writeCommand('.IP \\[bu] 2')
for child in item.getchildren():
self._generateElement(child)
def _generateElement(self, ele):
if ele.tag == SECTION:
self._generateSection(ele)
elif ele.tag == SYNOPSIS:
self._generateSynopsis(ele)
elif ele.tag == CODE:
self._generateCode(ele)
elif ele.tag == OUTPUT:
self._generateOutput(ele)
elif ele.tag == P:
self._generateP(ele)
elif ele.tag == EM:
self._generateEM(ele)
elif ele.tag == LISTING:
self._generateListing(ele)
elif ele.tag == ITEM:
self._generateItem(ele)
elif ele.tag == LIST:
self._generateList(ele)
elif ele.tag == TITLE:
pass
elif ele.tag == SCREEN:
self._generateScreen(ele)
elif ele.tag == LINK:
self._generateLink(ele)
elif ele.tag == NOTE:
self._generateNote(ele)
elif ele.tag == TABLE:
self._generateTable(ele)
elif ele.tag == TR:
self._generateTr(ele)
elif ele.tag == TD:
self._generateTd(ele)
elif ele.tag == INCLUDE:
f = ele.attrib['href']
f = os.path.join(self.relpath, f)
d = codecs.open(f, 'r', encoding='utf-8').read()
self._writeLine(d)
else:
print('unknown element type %s' % ele)
def _generateTable(self, table):
for child in table.getchildren():
self._generateElement(child)
def _generateTr(self, tr):
self._writeCommand('.TP')
self._writeCommand('.B')
for child in tr.getchildren():
self._generateElement(child)
self._writeCommand('.LP')
def _generateTd(self, td):
for child in td.getchildren():
self._generateElement(child)
def _generateLink(self, link):
text = link.text
if text and '()' in text:
text = text.replace('()', '(%s)' % self.section)
if text:
self._writeCommand('.B ' + text)
def _generateSections(self):
for section in self.sections:
self._generateElement(section)
def _generateFooter(self):
self._write('\n.B')
self._write('\n.SH COLOPHON')
self._write('\nThis page is part of %s.' % GROUP)
self._write('\nPlease report any bugs at %s.' % BUG_URL.replace('-','\\-'))
def _generate(self):
self.realname = self.outFile
self.outFile = codecs.open(self.outFile + '.tmp', 'w', encoding='utf-8')
self._generateHeader()
self._generateSections()
self._generateFooter()
os.rename(self.outFile.name, self.realname)
self.outFile.close()
def convert(self):
self._parse()
self._extract()
self._generate()
def main(filenames, section='3'):
for inFile in filenames:
dirName = os.path.dirname(inFile) + '/man/'
baseName = os.path.basename(inFile)
baseFile = os.path.splitext(baseName)[0]
outFile = dirName + baseFile + '.' + section
c = Convert(inFile, outFile, section)
c.convert()
if __name__ == '__main__':
if len(sys.argv) < 3:
print('usage: %s SECTION FILENAMES...' % sys.argv[0])
sys.exit(1)
section = sys.argv[1]
main(sys.argv[2:], section)
sys.exit(0)
|
|
"""Tests for seaborn utility functions."""
import tempfile
from urllib.request import urlopen
from http.client import HTTPException
import numpy as np
import pandas as pd
import matplotlib as mpl
import matplotlib.pyplot as plt
from cycler import cycler
import pytest
from numpy.testing import (
assert_array_equal,
)
from pandas.testing import (
assert_series_equal,
assert_frame_equal,
)
from distutils.version import LooseVersion
from .. import utils, rcmod
from ..utils import (
get_dataset_names,
get_color_cycle,
remove_na,
load_dataset,
_assign_default_kwargs,
_draw_figure,
_deprecate_ci,
)
a_norm = np.random.randn(100)
def _network(t=None, url="https://github.com"):
"""
Decorator that will skip a test if `url` is unreachable.
Parameters
----------
t : function, optional
url : str, optional
"""
if t is None:
return lambda x: _network(x, url=url)
def wrapper(*args, **kwargs):
# attempt to connect
try:
f = urlopen(url)
except (IOError, HTTPException):
pytest.skip("No internet connection")
else:
f.close()
return t(*args, **kwargs)
return wrapper
def test_ci_to_errsize():
"""Test behavior of ci_to_errsize."""
cis = [[.5, .5],
[1.25, 1.5]]
heights = [1, 1.5]
actual_errsize = np.array([[.5, 1],
[.25, 0]])
test_errsize = utils.ci_to_errsize(cis, heights)
assert_array_equal(actual_errsize, test_errsize)
def test_desaturate():
"""Test color desaturation."""
out1 = utils.desaturate("red", .5)
assert out1 == (.75, .25, .25)
out2 = utils.desaturate("#00FF00", .5)
assert out2 == (.25, .75, .25)
out3 = utils.desaturate((0, 0, 1), .5)
assert out3 == (.25, .25, .75)
out4 = utils.desaturate("red", .5)
assert out4 == (.75, .25, .25)
def test_desaturation_prop():
"""Test that pct outside of [0, 1] raises exception."""
with pytest.raises(ValueError):
utils.desaturate("blue", 50)
def test_saturate():
"""Test performance of saturation function."""
out = utils.saturate((.75, .25, .25))
assert out == (1, 0, 0)
@pytest.mark.parametrize(
"s,exp",
[
("a", "a"),
("abc", "abc"),
(b"a", "a"),
(b"abc", "abc"),
(bytearray("abc", "utf-8"), "abc"),
(bytearray(), ""),
(1, "1"),
(0, "0"),
([], str([])),
],
)
def test_to_utf8(s, exp):
"""Test the to_utf8 function: object to string"""
u = utils.to_utf8(s)
assert type(u) == str
assert u == exp
class TestSpineUtils(object):
sides = ["left", "right", "bottom", "top"]
outer_sides = ["top", "right"]
inner_sides = ["left", "bottom"]
offset = 10
original_position = ("outward", 0)
offset_position = ("outward", offset)
def test_despine(self):
f, ax = plt.subplots()
for side in self.sides:
assert ax.spines[side].get_visible()
utils.despine()
for side in self.outer_sides:
assert ~ax.spines[side].get_visible()
for side in self.inner_sides:
assert ax.spines[side].get_visible()
utils.despine(**dict(zip(self.sides, [True] * 4)))
for side in self.sides:
assert ~ax.spines[side].get_visible()
def test_despine_specific_axes(self):
f, (ax1, ax2) = plt.subplots(2, 1)
utils.despine(ax=ax2)
for side in self.sides:
assert ax1.spines[side].get_visible()
for side in self.outer_sides:
assert ~ax2.spines[side].get_visible()
for side in self.inner_sides:
assert ax2.spines[side].get_visible()
def test_despine_with_offset(self):
f, ax = plt.subplots()
for side in self.sides:
pos = ax.spines[side].get_position()
assert pos == self.original_position
utils.despine(ax=ax, offset=self.offset)
for side in self.sides:
is_visible = ax.spines[side].get_visible()
new_position = ax.spines[side].get_position()
if is_visible:
assert new_position == self.offset_position
else:
assert new_position == self.original_position
def test_despine_side_specific_offset(self):
f, ax = plt.subplots()
utils.despine(ax=ax, offset=dict(left=self.offset))
for side in self.sides:
is_visible = ax.spines[side].get_visible()
new_position = ax.spines[side].get_position()
if is_visible and side == "left":
assert new_position == self.offset_position
else:
assert new_position == self.original_position
def test_despine_with_offset_specific_axes(self):
f, (ax1, ax2) = plt.subplots(2, 1)
utils.despine(offset=self.offset, ax=ax2)
for side in self.sides:
pos1 = ax1.spines[side].get_position()
pos2 = ax2.spines[side].get_position()
assert pos1 == self.original_position
if ax2.spines[side].get_visible():
assert pos2 == self.offset_position
else:
assert pos2 == self.original_position
def test_despine_trim_spines(self):
f, ax = plt.subplots()
ax.plot([1, 2, 3], [1, 2, 3])
ax.set_xlim(.75, 3.25)
utils.despine(trim=True)
for side in self.inner_sides:
bounds = ax.spines[side].get_bounds()
assert bounds == (1, 3)
def test_despine_trim_inverted(self):
f, ax = plt.subplots()
ax.plot([1, 2, 3], [1, 2, 3])
ax.set_ylim(.85, 3.15)
ax.invert_yaxis()
utils.despine(trim=True)
for side in self.inner_sides:
bounds = ax.spines[side].get_bounds()
assert bounds == (1, 3)
def test_despine_trim_noticks(self):
f, ax = plt.subplots()
ax.plot([1, 2, 3], [1, 2, 3])
ax.set_yticks([])
utils.despine(trim=True)
assert ax.get_yticks().size == 0
def test_despine_trim_categorical(self):
f, ax = plt.subplots()
ax.plot(["a", "b", "c"], [1, 2, 3])
utils.despine(trim=True)
bounds = ax.spines["left"].get_bounds()
assert bounds == (1, 3)
bounds = ax.spines["bottom"].get_bounds()
assert bounds == (0, 2)
def test_despine_moved_ticks(self):
f, ax = plt.subplots()
for t in ax.yaxis.majorTicks:
t.tick1line.set_visible(True)
utils.despine(ax=ax, left=True, right=False)
for t in ax.yaxis.majorTicks:
assert t.tick2line.get_visible()
plt.close(f)
f, ax = plt.subplots()
for t in ax.yaxis.majorTicks:
t.tick1line.set_visible(False)
utils.despine(ax=ax, left=True, right=False)
for t in ax.yaxis.majorTicks:
assert not t.tick2line.get_visible()
plt.close(f)
f, ax = plt.subplots()
for t in ax.xaxis.majorTicks:
t.tick1line.set_visible(True)
utils.despine(ax=ax, bottom=True, top=False)
for t in ax.xaxis.majorTicks:
assert t.tick2line.get_visible()
plt.close(f)
f, ax = plt.subplots()
for t in ax.xaxis.majorTicks:
t.tick1line.set_visible(False)
utils.despine(ax=ax, bottom=True, top=False)
for t in ax.xaxis.majorTicks:
assert not t.tick2line.get_visible()
plt.close(f)
def test_ticklabels_overlap():
rcmod.set()
f, ax = plt.subplots(figsize=(2, 2))
f.tight_layout() # This gets the Agg renderer working
assert not utils.axis_ticklabels_overlap(ax.get_xticklabels())
big_strings = "abcdefgh", "ijklmnop"
ax.set_xlim(-.5, 1.5)
ax.set_xticks([0, 1])
ax.set_xticklabels(big_strings)
assert utils.axis_ticklabels_overlap(ax.get_xticklabels())
x, y = utils.axes_ticklabels_overlap(ax)
assert x
assert not y
def test_locator_to_legend_entries():
locator = mpl.ticker.MaxNLocator(nbins=3)
limits = (0.09, 0.4)
levels, str_levels = utils.locator_to_legend_entries(
locator, limits, float
)
assert str_levels == ["0.15", "0.30"]
limits = (0.8, 0.9)
levels, str_levels = utils.locator_to_legend_entries(
locator, limits, float
)
assert str_levels == ["0.80", "0.84", "0.88"]
limits = (1, 6)
levels, str_levels = utils.locator_to_legend_entries(locator, limits, int)
assert str_levels == ["2", "4", "6"]
locator = mpl.ticker.LogLocator(numticks=5)
limits = (5, 1425)
levels, str_levels = utils.locator_to_legend_entries(locator, limits, int)
if LooseVersion(mpl.__version__) >= "3.1":
assert str_levels == ['10', '100', '1000']
limits = (0.00003, 0.02)
levels, str_levels = utils.locator_to_legend_entries(
locator, limits, float
)
if LooseVersion(mpl.__version__) >= "3.1":
assert str_levels == ['1e-04', '1e-03', '1e-02']
def check_load_dataset(name):
ds = load_dataset(name, cache=False)
assert(isinstance(ds, pd.DataFrame))
def check_load_cached_dataset(name):
# Test the cacheing using a temporary file.
with tempfile.TemporaryDirectory() as tmpdir:
# download and cache
ds = load_dataset(name, cache=True, data_home=tmpdir)
# use cached version
ds2 = load_dataset(name, cache=True, data_home=tmpdir)
assert_frame_equal(ds, ds2)
@_network(url="https://github.com/mwaskom/seaborn-data")
def test_get_dataset_names():
names = get_dataset_names()
assert names
assert "tips" in names
@_network(url="https://github.com/mwaskom/seaborn-data")
def test_load_datasets():
# Heavy test to verify that we can load all available datasets
for name in get_dataset_names():
# unfortunately @network somehow obscures this generator so it
# does not get in effect, so we need to call explicitly
# yield check_load_dataset, name
check_load_dataset(name)
@_network(url="https://github.com/mwaskom/seaborn-data")
def test_load_dataset_error():
name = "bad_name"
err = f"'{name}' is not one of the example datasets."
with pytest.raises(ValueError, match=err):
load_dataset(name)
@_network(url="https://github.com/mwaskom/seaborn-data")
def test_load_cached_datasets():
# Heavy test to verify that we can load all available datasets
for name in get_dataset_names():
# unfortunately @network somehow obscures this generator so it
# does not get in effect, so we need to call explicitly
# yield check_load_dataset, name
check_load_cached_dataset(name)
def test_relative_luminance():
"""Test relative luminance."""
out1 = utils.relative_luminance("white")
assert out1 == 1
out2 = utils.relative_luminance("#000000")
assert out2 == 0
out3 = utils.relative_luminance((.25, .5, .75))
assert out3 == pytest.approx(0.201624536)
rgbs = mpl.cm.RdBu(np.linspace(0, 1, 10))
lums1 = [utils.relative_luminance(rgb) for rgb in rgbs]
lums2 = utils.relative_luminance(rgbs)
for lum1, lum2 in zip(lums1, lums2):
assert lum1 == pytest.approx(lum2)
@pytest.mark.parametrize(
"cycler,result",
[
(cycler(color=["y"]), ["y"]),
(cycler(color=["k"]), ["k"]),
(cycler(color=["k", "y"]), ["k", "y"]),
(cycler(color=["y", "k"]), ["y", "k"]),
(cycler(color=["b", "r"]), ["b", "r"]),
(cycler(color=["r", "b"]), ["r", "b"]),
(cycler(lw=[1, 2]), [".15"]), # no color in cycle
],
)
def test_get_color_cycle(cycler, result):
with mpl.rc_context(rc={"axes.prop_cycle": cycler}):
assert get_color_cycle() == result
def test_remove_na():
a_array = np.array([1, 2, np.nan, 3])
a_array_rm = remove_na(a_array)
assert_array_equal(a_array_rm, np.array([1, 2, 3]))
a_series = pd.Series([1, 2, np.nan, 3])
a_series_rm = remove_na(a_series)
assert_series_equal(a_series_rm, pd.Series([1., 2, 3], [0, 1, 3]))
def test_assign_default_kwargs():
def f(a, b, c, d):
pass
def g(c=1, d=2):
pass
kws = {"c": 3}
kws = _assign_default_kwargs(kws, f, g)
assert kws == {"c": 3, "d": 2}
def test_draw_figure():
f, ax = plt.subplots()
ax.plot(["a", "b", "c"], [1, 2, 3])
_draw_figure(f)
assert not f.stale
# ticklabels are not populated until a draw, but this may change
assert ax.get_xticklabels()[0].get_text() == "a"
def test_deprecate_ci():
msg = "The `ci` parameter is deprecated; use `errorbar="
with pytest.warns(UserWarning, match=msg + "None"):
out = _deprecate_ci(None, None)
assert out is None
with pytest.warns(UserWarning, match=msg + "'sd'"):
out = _deprecate_ci(None, "sd")
assert out == "sd"
with pytest.warns(UserWarning, match=msg + r"\('ci', 68\)"):
out = _deprecate_ci(None, 68)
assert out == ("ci", 68)
|
|
# -*- coding: utf-8 -*-
import json
from collections import OrderedDict
from datetime import datetime, timedelta
from typing import Any, Dict, Mapping
from mock import patch
from eduid_common.api.testing import EduidAPITestCase
from eduid_webapp.lookup_mobile_proofing.app import MobileProofingApp, init_lookup_mobile_proofing_app
from eduid_webapp.lookup_mobile_proofing.helpers import MobileMsg
from eduid_webapp.lookup_mobile_proofing.lookup_mobile_relay import LookupMobileTaskFailed
__author__ = 'lundberg'
class LookupMobileProofingTests(EduidAPITestCase):
"""Base TestCase for those tests that need a full environment setup"""
app: MobileProofingApp
def setUp(self):
self.test_user_eppn = 'hubba-baar'
self.test_user_nin = '199001023456'
fifteen_years_ago = datetime.now() - timedelta(days=15 * 365)
self.test_user_nin_underage = '{}01023456'.format(fifteen_years_ago.year)
self.mock_address = OrderedDict(
[
(
u'Name',
OrderedDict(
[(u'GivenNameMarking', u'20'), (u'GivenName', u'Testaren Test'), (u'Surname', u'Testsson')]
),
),
(
u'OfficialAddress',
OrderedDict(
[(u'Address2', u'\xd6RGATAN 79 LGH 10'), (u'PostalCode', u'12345'), (u'City', u'LANDET')]
),
),
]
)
super(LookupMobileProofingTests, self).setUp(users=['hubba-baar'])
def load_app(self, config: Mapping[str, Any]):
"""
Called from the parent class, so we can provide the appropriate flask
app for this test case.
"""
return init_lookup_mobile_proofing_app('testing', config)
def update_config(self, config: Dict[str, Any]) -> Dict[str, Any]:
config.update(
{
'msg_broker_url': 'amqp://dummy',
'am_broker_url': 'amqp://dummy',
'lookup_mobile_broker_url': 'amqp://dummy',
'celery': {'result_backend': 'amqp', 'task_serializer': 'json'},
'environment': 'dev',
'magic_cookie': '',
'magic_cookie_name': '',
},
)
return config
def test_authenticate(self):
response = self.browser.get('/proofing')
self.assertEqual(response.status_code, 302) # Redirect to token service
with self.session_cookie(self.browser, self.test_user_eppn) as browser:
response = browser.get('/proofing')
self.assertEqual(response.status_code, 200) # Authenticated request
@patch('eduid_webapp.lookup_mobile_proofing.lookup_mobile_relay.LookupMobileRelay.find_nin_by_mobile')
@patch('eduid_common.api.msg.MsgRelay.get_postal_address')
@patch('eduid_common.api.am.AmRelay.request_user_sync')
def test_proofing_flow(self, mock_request_user_sync, mock_get_postal_address, mock_find_nin_by_mobile):
mock_find_nin_by_mobile.return_value = self.test_user_nin
mock_get_postal_address.return_value = self.mock_address
mock_request_user_sync.side_effect = self.request_user_sync
with self.session_cookie(self.browser, self.test_user_eppn) as browser:
response = json.loads(browser.get('/proofing').data)
self.assertEqual(response['type'], 'GET_LOOKUP_MOBILE_PROOFING_PROOFING_SUCCESS')
csrf_token = response['payload']['csrf_token']
with self.session_cookie(self.browser, self.test_user_eppn) as browser:
data = {'nin': self.test_user_nin, 'csrf_token': csrf_token}
response = browser.post('/proofing', data=json.dumps(data), content_type=self.content_type_json)
response = json.loads(response.data)
self.assertEqual(response['type'], 'POST_LOOKUP_MOBILE_PROOFING_PROOFING_SUCCESS')
self.assertEqual(response['payload']['success'], True)
user = self.app.private_userdb.get_user_by_eppn(self.test_user_eppn)
self.assertEqual(user.nins.primary.number, self.test_user_nin)
self.assertEqual(user.nins.primary.created_by, 'lookup_mobile_proofing')
self.assertEqual(user.nins.primary.verified_by, 'lookup_mobile_proofing')
self.assertEqual(user.nins.primary.is_verified, True)
self.assertEqual(self.app.proofing_log.db_count(), 1)
@patch('eduid_webapp.lookup_mobile_proofing.lookup_mobile_relay.LookupMobileRelay.find_nin_by_mobile')
@patch('eduid_common.api.msg.MsgRelay.get_postal_address')
@patch('eduid_common.api.am.AmRelay.request_user_sync')
def test_proofing_flow_underage(self, mock_request_user_sync, mock_get_postal_address, mock_find_nin_by_mobile):
mock_find_nin_by_mobile.return_value = self.test_user_nin_underage
mock_get_postal_address.return_value = self.mock_address
mock_request_user_sync.side_effect = self.request_user_sync
with self.session_cookie(self.browser, self.test_user_eppn) as browser:
response = json.loads(browser.get('/proofing').data)
self.assertEqual(response['type'], 'GET_LOOKUP_MOBILE_PROOFING_PROOFING_SUCCESS')
csrf_token = response['payload']['csrf_token']
with self.session_cookie(self.browser, self.test_user_eppn) as browser:
data = {'nin': self.test_user_nin_underage, 'csrf_token': csrf_token}
response = browser.post('/proofing', data=json.dumps(data), content_type=self.content_type_json)
response = json.loads(response.data)
self.assertEqual(response['type'], 'POST_LOOKUP_MOBILE_PROOFING_PROOFING_SUCCESS')
self.assertEqual(response['payload']['success'], True)
user = self.app.private_userdb.get_user_by_eppn(self.test_user_eppn)
self.assertEqual(user.nins.primary.number, self.test_user_nin_underage)
self.assertEqual(user.nins.primary.created_by, 'lookup_mobile_proofing')
self.assertEqual(user.nins.primary.verified_by, 'lookup_mobile_proofing')
self.assertEqual(user.nins.primary.is_verified, True)
self.assertEqual(self.app.proofing_log.db_count(), 1)
@patch('eduid_webapp.lookup_mobile_proofing.lookup_mobile_relay.LookupMobileRelay.find_nin_by_mobile')
@patch('eduid_common.api.msg.MsgRelay.get_postal_address')
@patch('eduid_common.api.am.AmRelay.request_user_sync')
def test_proofing_flow_no_match(self, mock_request_user_sync, mock_get_postal_address, mock_find_nin_by_mobile):
mock_find_nin_by_mobile.return_value = None
mock_get_postal_address.return_value = self.mock_address
mock_request_user_sync.side_effect = self.request_user_sync
with self.session_cookie(self.browser, self.test_user_eppn) as browser:
response = json.loads(browser.get('/proofing').data)
self.assertEqual(response['type'], 'GET_LOOKUP_MOBILE_PROOFING_PROOFING_SUCCESS')
csrf_token = response['payload']['csrf_token']
with self.session_cookie(self.browser, self.test_user_eppn) as browser:
data = {'nin': self.test_user_nin, 'csrf_token': csrf_token}
response = browser.post('/proofing', data=json.dumps(data), content_type=self.content_type_json)
response = json.loads(response.data)
self.assertEqual(response['type'], 'POST_LOOKUP_MOBILE_PROOFING_PROOFING_FAIL')
user = self.app.private_userdb.get_user_by_eppn(self.test_user_eppn)
self.assertEqual(user.nins.count, 1)
self.assertEqual(user.nins.find(self.test_user_nin).created_by, 'lookup_mobile_proofing')
self.assertEqual(user.nins.find(self.test_user_nin).is_verified, False)
self.assertEqual(self.app.proofing_log.db_count(), 0)
@patch('eduid_webapp.lookup_mobile_proofing.lookup_mobile_relay.LookupMobileRelay.find_nin_by_mobile')
@patch('eduid_common.api.msg.MsgRelay.get_postal_address')
@patch('eduid_common.api.am.AmRelay.request_user_sync')
def test_proofing_flow_LookupMobileTaskFailed(
self, mock_request_user_sync, mock_get_postal_address, mock_find_nin_by_mobile
):
mock_find_nin_by_mobile.side_effect = LookupMobileTaskFailed('Test Exception')
mock_get_postal_address.return_value = self.mock_address
mock_request_user_sync.side_effect = self.request_user_sync
with self.session_cookie(self.browser, self.test_user_eppn) as browser:
response = json.loads(browser.get('/proofing').data)
self.assertEqual(response['type'], 'GET_LOOKUP_MOBILE_PROOFING_PROOFING_SUCCESS')
csrf_token = response['payload']['csrf_token']
with self.session_cookie(self.browser, self.test_user_eppn) as browser:
data = {'nin': self.test_user_nin, 'csrf_token': csrf_token}
response = browser.post('/proofing', data=json.dumps(data), content_type=self.content_type_json)
response = json.loads(response.data)
self.assertEqual('POST_LOOKUP_MOBILE_PROOFING_PROOFING_FAIL', response['type'])
self.assertEqual(MobileMsg.lookup_error.value, response['payload']['message'])
user = self.app.private_userdb.get_user_by_eppn(self.test_user_eppn)
self.assertEqual(user.nins.count, 1)
self.assertEqual(user.nins.find(self.test_user_nin).created_by, 'lookup_mobile_proofing')
self.assertEqual(user.nins.find(self.test_user_nin).is_verified, False)
self.assertEqual(self.app.proofing_log.db_count(), 0)
@patch('eduid_webapp.lookup_mobile_proofing.lookup_mobile_relay.LookupMobileRelay.find_nin_by_mobile')
@patch('eduid_common.api.msg.MsgRelay.get_postal_address')
@patch('eduid_common.api.am.AmRelay.request_user_sync')
def test_proofing_flow_no_match_backdoor(
self, mock_request_user_sync, mock_get_postal_address, mock_find_nin_by_mobile
):
mock_find_nin_by_mobile.return_value = None
mock_get_postal_address.return_value = None
mock_request_user_sync.side_effect = self.request_user_sync
self.app.conf.magic_cookie = 'magic-cookie'
self.app.conf.magic_cookie_name = 'magic-cookie'
user = self.app.central_userdb.get_user_by_eppn(self.test_user_eppn)
with self.session_cookie(self.browser, self.test_user_eppn) as browser:
response = json.loads(browser.get('/proofing').data)
self.assertEqual(response['type'], 'GET_LOOKUP_MOBILE_PROOFING_PROOFING_SUCCESS')
csrf_token = response['payload']['csrf_token']
with self.session_cookie(self.browser, self.test_user_eppn) as browser:
browser.set_cookie('localhost', key='magic-cookie', value='magic-cookie')
data = {'nin': self.test_user_nin, 'csrf_token': csrf_token}
response = browser.post('/proofing', data=json.dumps(data), content_type=self.content_type_json)
response = json.loads(response.data)
self.assertEqual(response['type'], 'POST_LOOKUP_MOBILE_PROOFING_PROOFING_SUCCESS')
self.assertEqual(response['payload']['success'], True)
user = self.app.private_userdb.get_user_by_eppn(self.test_user_eppn)
self.assertEqual(user.nins.primary.number, self.test_user_nin)
self.assertEqual(user.nins.primary.created_by, 'lookup_mobile_proofing')
self.assertEqual(user.nins.primary.verified_by, 'lookup_mobile_proofing')
self.assertEqual(user.nins.primary.is_verified, True)
self.assertEqual(self.app.proofing_log.db_count(), 1)
@patch('eduid_webapp.lookup_mobile_proofing.lookup_mobile_relay.LookupMobileRelay.find_nin_by_mobile')
@patch('eduid_common.api.msg.MsgRelay.get_postal_address')
@patch('eduid_common.api.am.AmRelay.request_user_sync')
def test_proofing_flow_no_match_backdoor_code_in_pro(
self, mock_request_user_sync, mock_get_postal_address, mock_find_nin_by_mobile
):
mock_find_nin_by_mobile.return_value = None
mock_get_postal_address.return_value = None
mock_request_user_sync.side_effect = self.request_user_sync
self.app.conf.environment = 'pro'
self.app.conf.magic_cookie = 'magic-cookie'
self.app.conf.magic_cookie_name = 'magic-cookie'
user = self.app.central_userdb.get_user_by_eppn(self.test_user_eppn)
with self.session_cookie(self.browser, self.test_user_eppn) as browser:
response = json.loads(browser.get('/proofing').data)
self.assertEqual(response['type'], 'GET_LOOKUP_MOBILE_PROOFING_PROOFING_SUCCESS')
csrf_token = response['payload']['csrf_token']
with self.session_cookie(self.browser, self.test_user_eppn) as browser:
browser.set_cookie('localhost', key='magic-cookie', value='magic-cookie')
data = {'nin': self.test_user_nin, 'csrf_token': csrf_token}
response = browser.post('/proofing', data=json.dumps(data), content_type=self.content_type_json)
response = json.loads(response.data)
self.assertEqual(response['type'], 'POST_LOOKUP_MOBILE_PROOFING_PROOFING_FAIL')
user = self.app.private_userdb.get_user_by_eppn(self.test_user_eppn)
self.assertEqual(user.nins.count, 1)
self.assertEqual(user.nins.find(self.test_user_nin).created_by, 'lookup_mobile_proofing')
self.assertEqual(user.nins.find(self.test_user_nin).is_verified, False)
self.assertEqual(self.app.proofing_log.db_count(), 0)
@patch('eduid_webapp.lookup_mobile_proofing.lookup_mobile_relay.LookupMobileRelay.find_nin_by_mobile')
@patch('eduid_common.api.msg.MsgRelay.get_postal_address')
@patch('eduid_common.api.am.AmRelay.request_user_sync')
def test_proofing_flow_no_match_backdoor_code_unconfigured(
self, mock_request_user_sync, mock_get_postal_address, mock_find_nin_by_mobile
):
mock_find_nin_by_mobile.return_value = None
mock_get_postal_address.return_value = None
mock_request_user_sync.side_effect = self.request_user_sync
self.app.conf.magic_cookie = ''
self.app.conf.magic_cookie_name = 'magic-cookie'
user = self.app.central_userdb.get_user_by_eppn(self.test_user_eppn)
with self.session_cookie(self.browser, self.test_user_eppn) as browser:
response = json.loads(browser.get('/proofing').data)
self.assertEqual(response['type'], 'GET_LOOKUP_MOBILE_PROOFING_PROOFING_SUCCESS')
csrf_token = response['payload']['csrf_token']
with self.session_cookie(self.browser, self.test_user_eppn) as browser:
browser.set_cookie('localhost', key='magic-cookie', value='magic-cookie')
data = {'nin': self.test_user_nin, 'csrf_token': csrf_token}
response = browser.post('/proofing', data=json.dumps(data), content_type=self.content_type_json)
response = json.loads(response.data)
self.assertEqual(response['type'], 'POST_LOOKUP_MOBILE_PROOFING_PROOFING_FAIL')
user = self.app.private_userdb.get_user_by_eppn(self.test_user_eppn)
self.assertEqual(user.nins.count, 1)
self.assertEqual(user.nins.find(self.test_user_nin).created_by, 'lookup_mobile_proofing')
self.assertEqual(user.nins.find(self.test_user_nin).is_verified, False)
self.assertEqual(self.app.proofing_log.db_count(), 0)
@patch('eduid_common.api.msg.MsgRelay.get_relations_to')
@patch('eduid_webapp.lookup_mobile_proofing.lookup_mobile_relay.LookupMobileRelay.find_nin_by_mobile')
@patch('eduid_common.api.msg.MsgRelay.get_postal_address')
@patch('eduid_common.api.am.AmRelay.request_user_sync')
def test_proofing_flow_relation(
self, mock_request_user_sync, mock_get_postal_address, mock_find_nin_by_mobile, mock_get_relations_to
):
mock_get_relations_to.return_value = ['MO']
mock_find_nin_by_mobile.return_value = '197001021234'
mock_get_postal_address.return_value = self.mock_address
mock_request_user_sync.side_effect = self.request_user_sync
with self.session_cookie(self.browser, self.test_user_eppn) as browser:
response = json.loads(browser.get('/proofing').data)
self.assertEqual(response['type'], 'GET_LOOKUP_MOBILE_PROOFING_PROOFING_SUCCESS')
csrf_token = response['payload']['csrf_token']
with self.session_cookie(self.browser, self.test_user_eppn) as browser:
data = {'nin': self.test_user_nin_underage, 'csrf_token': csrf_token}
response = browser.post('/proofing', data=json.dumps(data), content_type=self.content_type_json)
response = json.loads(response.data)
self.assertEqual(response['type'], 'POST_LOOKUP_MOBILE_PROOFING_PROOFING_SUCCESS')
self.assertEqual(response['payload']['success'], True)
user = self.app.private_userdb.get_user_by_eppn(self.test_user_eppn)
self.assertEqual(user.nins.primary.number, self.test_user_nin_underage)
self.assertEqual(user.nins.primary.created_by, 'lookup_mobile_proofing')
self.assertEqual(user.nins.primary.verified_by, 'lookup_mobile_proofing')
self.assertEqual(user.nins.primary.is_verified, True)
self.assertEqual(self.app.proofing_log.db_count(), 1)
@patch('eduid_common.api.msg.MsgRelay.get_relations_to')
@patch('eduid_webapp.lookup_mobile_proofing.lookup_mobile_relay.LookupMobileRelay.find_nin_by_mobile')
@patch('eduid_common.api.msg.MsgRelay.get_postal_address')
@patch('eduid_common.api.am.AmRelay.request_user_sync')
def test_proofing_flow_relation_no_match(
self, mock_request_user_sync, mock_get_postal_address, mock_find_nin_by_mobile, mock_get_relations_to
):
mock_get_relations_to.return_value = []
mock_find_nin_by_mobile.return_value = '197001021234'
mock_get_postal_address.return_value = self.mock_address
mock_request_user_sync.side_effect = self.request_user_sync
with self.session_cookie(self.browser, self.test_user_eppn) as browser:
response = json.loads(browser.get('/proofing').data)
self.assertEqual(response['type'], 'GET_LOOKUP_MOBILE_PROOFING_PROOFING_SUCCESS')
csrf_token = response['payload']['csrf_token']
with self.session_cookie(self.browser, self.test_user_eppn) as browser:
data = {'nin': self.test_user_nin_underage, 'csrf_token': csrf_token}
response = browser.post('/proofing', data=json.dumps(data), content_type=self.content_type_json)
response = json.loads(response.data)
self.assertEqual(response['type'], 'POST_LOOKUP_MOBILE_PROOFING_PROOFING_FAIL')
user = self.app.private_userdb.get_user_by_eppn(self.test_user_eppn)
self.assertEqual(user.nins.count, 1)
self.assertEqual(user.nins.find(self.test_user_nin_underage).created_by, 'lookup_mobile_proofing')
self.assertEqual(user.nins.find(self.test_user_nin_underage).is_verified, False)
self.assertEqual(self.app.proofing_log.db_count(), 0)
|
|
# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Part of the Keras training engine related to distributed training.
"""
# pylint: disable=protected-access
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from tensorflow.python.distribute import distribute_coordinator as dc
from tensorflow.python.distribute import distribution_strategy_context
from tensorflow.python.distribute import input_lib
from tensorflow.python.distribute import reduce_util as ds_reduce_util
from tensorflow.python.eager import context
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import errors
from tensorflow.python.framework import ops
from tensorflow.python.keras import backend as K
from tensorflow.python.keras import callbacks as cbks
from tensorflow.python.keras.distribute import distributed_training_utils as dist_utils
from tensorflow.python.keras.engine import partial_batch_padding_handler as padding_util
from tensorflow.python.keras.engine import training_arrays
from tensorflow.python.keras.engine import training_utils
from tensorflow.python.keras.utils.generic_utils import Progbar
from tensorflow.python.keras.utils.mode_keys import ModeKeys
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import control_flow_ops
from tensorflow.python.platform import tf_logging as logging
def _per_replica_execution_function(model, mode):
exec_func = model._make_execution_function(mode)
return (exec_func.inputs, exec_func.outputs, exec_func.updates_op,
exec_func.session_kwargs)
def _build_model(strategy, model, mode, inputs, targets=None):
if model._compile_distribution:
dist_utils.clone_model_on_replicas(
model, strategy, mode, inputs=inputs, targets=targets)
else:
dist_utils._build_distributed_network(model, strategy, mode, inputs,
targets)
def _make_train_step_fn(model, mode, strategy, output_labels):
"""Create step fn.
Arguments:
model: a Keras Model instance.
mode: One of ModeKeys.TRAIN/ModeKeys.TEST/ModeKeys.PREDICT.
strategy: a `tf.distribute.Strategy` instance.
output_labels: the output labels for the step function.
Returns:
A step function to run by `tf.distribute.Strategy`.
"""
def _step_fn(ctx, inputs):
"""A step fn that returns update ops."""
if isinstance(inputs, (tuple, list)) and len(inputs) == 2:
inputs, targets = inputs
else:
targets = None
# When input feature is a dictionary of tensors, dictionary is flattended
# to an array and passed as a model input. This results in input mismatch
# when model input layer names are not sorted in alphabetical order as
# `nest.flatten()`sorts dictionary elements by keys. As so, transform input
# tensors into an array and order it along `model._feed_input_names`.
if isinstance(inputs, dict):
inputs = [inputs[input_name] for input_name in model._feed_input_names]
_build_model(strategy, model, mode, inputs, targets)
(grouped_inputs, grouped_outputs, grouped_updates,
grouped_session_args) = strategy.extended.call_for_each_replica(
_per_replica_execution_function,
args=(dist_utils.get_distributed_model(model, mode), mode))
(all_inputs, all_outputs, all_updates,
all_session_args) = dist_utils.unwrap_values(strategy, grouped_inputs,
grouped_outputs,
grouped_updates,
grouped_session_args)
combined_fn = K.function(
all_inputs,
all_outputs,
updates=all_updates,
name='distributed_' + str(mode) + '_function',
**all_session_args)
for label, output in zip(output_labels, combined_fn.outputs):
if label == 'loss':
reduce_op = ds_reduce_util.ReduceOp.SUM
else:
# We reduce all other metrics using mean for now. This is temporary
# workaround until new metrics are in place.
reduce_op = ds_reduce_util.ReduceOp.MEAN
ctx.set_last_step_output(label, output, reduce_op)
# TODO(priyag, sourabhbajaj): Ignoring these things from the combined_fn:
# feed_dict, session kwargs, run options, run_metadata for now. These should
# be handled appropriately
return combined_fn.updates_op
return _step_fn
def experimental_tpu_fit_loop(model,
dataset,
epochs=100,
verbose=1,
callbacks=None,
initial_epoch=0,
steps_per_epoch=None,
val_dataset=None,
validation_steps=None,
validation_freq=1):
"""Fit loop for training with TPU tf.distribute.Strategy.
Arguments:
model: Keras Model instance.
dataset: Dataset that returns inputs and targets
epochs: Number of times to iterate over the data
verbose: Integer, Verbosity mode, 0, 1 or 2
callbacks: List of callbacks to be called during training
initial_epoch: Epoch at which to start training
(useful for resuming a previous training run)
steps_per_epoch: Total number of steps (batches of samples)
before declaring one epoch finished and starting the
next epoch. Ignored with the default value of `None`.
val_dataset: Dataset for validation data.
validation_steps: Number of steps to run validation for
(only if doing validation from data tensors).
Ignored with the default value of `None`.
validation_freq: Only relevant if validation data is provided. Integer or
`collections.abc.Container` instance (e.g. list, tuple, etc.). If an
integer, specifies how many training epochs to run before a new
validation run is performed, e.g. `validation_freq=2` runs
validation every 2 epochs. If a Container, specifies the epochs on
which to run validation, e.g. `validation_freq=[1, 2, 10]` runs
validation at the end of the 1st, 2nd, and 10th epochs.
Returns:
Returns `None`.
Raises:
ValueError: in case of invalid arguments.
"""
mode = ModeKeys.TRAIN
current_strategy = model._distribution_strategy
iteration_value = min(steps_per_epoch,
current_strategy.extended.steps_per_run)
steps_per_run = K.variable(
value=iteration_value,
dtype='int32',
name='steps_per_run')
# TODO(fchollet): add support for `steps_per_epoch=None` in TPU loops.
iterator = dist_utils.get_iterator(dataset, current_strategy)
scope = dist_utils.distributed_scope(
strategy=current_strategy, learning_phase=1)
scope.__enter__()
out_labels = model.metrics_names or []
step_fn = _make_train_step_fn(model, ModeKeys.TRAIN, current_strategy,
out_labels)
# Add initial dummy values for loss and other metric tensors.
initial_loop_values = {}
initial_loop_values['loss'] = constant_op.constant(1e7)
for m in model._get_training_eval_metrics():
tensor = m.result()
initial_loop_values[m.name] = array_ops.zeros(tensor.shape, tensor.dtype)
ctx = current_strategy.extended.experimental_run_steps_on_iterator(
step_fn, iterator, iterations=steps_per_run,
initial_loop_values=initial_loop_values)
train_op = ctx.run_op
output_tensors = ctx.last_step_outputs
do_validation = bool(validation_steps)
if model._compile_distribution:
dist_utils._copy_weights_to_distributed_model(model, mode)
callbacks = cbks.configure_callbacks(
callbacks,
model,
do_validation=do_validation,
epochs=epochs,
steps_per_epoch=steps_per_epoch,
verbose=verbose,
count_mode='steps',
mode=mode)
# Calculate the steps each time on the device.
steps_to_run = ([current_strategy.extended.steps_per_run] *
(steps_per_epoch //
current_strategy.extended.steps_per_run))
if steps_per_epoch % current_strategy.extended.steps_per_run:
steps_to_run.append(
steps_per_epoch % current_strategy.extended.steps_per_run)
target_steps = len(steps_to_run)
callbacks._call_begin_hook(mode)
initial_epoch = model._maybe_load_initial_epoch_from_ckpt(initial_epoch, mode)
for epoch in range(initial_epoch, epochs):
dist_utils._reset_metrics(model)
callbacks.on_epoch_begin(epoch)
epoch_logs = {}
step_index = 0
prev_step_count = None
current_step = 0
while current_step < target_steps:
step_count = steps_to_run[current_step]
batch_logs = {'batch': step_index, 'size': 1, 'num_steps': step_count}
callbacks._call_batch_hook(mode, 'begin', step_index, batch_logs)
if prev_step_count is None or step_count != prev_step_count:
K.get_session().run(steps_per_run.assign(step_count))
prev_step_count = step_count
try:
_, outputs = K.batch_get_value([train_op, output_tensors])
except errors.OutOfRangeError:
logging.warning('Your dataset iterator ran out of data; '
'interrupting training. Make sure that your dataset '
'can generate at least `steps_per_epoch * epochs` '
'batches (in this case, %d batches).' %
steps_per_epoch * epochs)
break
batch_logs.update(outputs)
callbacks._call_batch_hook(mode, 'end', step_index, batch_logs)
step_index = step_index + step_count
current_step += 1
if callbacks.model.stop_training:
break
if (do_validation and
training_utils.should_run_validation(validation_freq, epoch)):
logging.info('Running validation at fit epoch: %s', epoch)
if model._compile_distribution:
# Since we create a new clone from the original model we need to copy
# the weights back to the original model before we can run validation.
dist_utils._copy_weights_to_original_model(model, ModeKeys.TRAIN)
val_outs = experimental_tpu_test_loop( # pylint: disable=undefined-variable
model,
val_dataset,
steps=validation_steps,
verbose=verbose,
callbacks=callbacks)
if not isinstance(val_outs, list):
val_outs = [val_outs]
# Same labels assumed.
for label, val_out in zip(out_labels, val_outs):
epoch_logs['val_' + label] = val_out
callbacks.on_epoch_end(epoch, epoch_logs)
if callbacks.model.stop_training:
break
model._successful_loop_finish = True
callbacks._call_end_hook(mode)
if model._compile_distribution:
# Copy the weights back from the replicated model to the original model.
dist_utils._copy_weights_to_original_model(model, ModeKeys.TRAIN)
scope.__exit__(None, None, None)
return model.history
def experimental_tpu_test_loop(model,
dataset,
verbose=0,
steps=None,
callbacks=None):
"""Test loop for evaluating with TPU tf.distribute.Strategy.
Arguments:
model: Keras Model instance.
dataset: Dataset for input data.
verbose: Integer, Verbosity mode 0 or 1.
steps: Total number of steps (batches of samples)
before declaring predictions finished.
Ignored with the default value of `None`.
callbacks: List of callbacks to be called during training
Returns:
Scalar loss (if the model has a single output and no metrics)
or list of scalars (if the model has multiple outputs
and/or metrics). The attribute `model.metrics_names` will give you
the display labels for the outputs.
"""
mode = ModeKeys.TEST
current_strategy = model._distribution_strategy
iterator = dist_utils.get_iterator(dataset, current_strategy)
scope = dist_utils.distributed_scope(
strategy=current_strategy, learning_phase=0)
scope.__enter__()
out_labels = model.metrics_names
def _test_step_fn(inputs):
"""A fn that returns output of single test step."""
if isinstance(inputs, (tuple, list)) and len(inputs) == 2:
inputs, targets = inputs
else:
targets = None
(distribution_strategy_context.get_replica_context().merge_call(
_build_model, args=(model, mode, inputs, targets)))
(_, outputs, updates, _) = _per_replica_execution_function(
dist_utils.get_distributed_model(model, mode), mode)
with ops.control_dependencies([updates]):
return [array_ops.identity(out) for out in outputs]
test_input_data = iterator.get_next()
per_replica_outputs = current_strategy.run(
_test_step_fn, args=(test_input_data,))
output_tensors = {}
for label, output in zip(out_labels, per_replica_outputs):
if label == 'loss':
reduce_op = ds_reduce_util.ReduceOp.SUM
else:
# We reduce all other metrics using mean for now. This is temporary
# workaround until new metrics are in place.
reduce_op = ds_reduce_util.ReduceOp.MEAN
output_tensors[label] = current_strategy.reduce(reduce_op, output,
axis=None)
test_op = control_flow_ops.group(list(output_tensors.values()))
if verbose >= 1:
progbar = Progbar(target=steps)
if model._compile_distribution:
dist_utils._copy_weights_to_distributed_model(model, mode)
dist_utils._reset_metrics(model)
callbacks = cbks.configure_callbacks(
callbacks,
model,
do_validation=False,
epochs=1,
steps_per_epoch=steps,
verbose=verbose,
count_mode='steps',
mode=ModeKeys.TEST)
callbacks._call_begin_hook(mode)
outs = [0.] * len(model.metrics_names)
if steps is not None:
target_steps = steps
else:
raise ValueError('Number of steps could not be inferred from the data, '
'please pass the steps argument.')
current_step = 0
while current_step < target_steps:
batch_logs = {'batch': current_step, 'size': 1}
callbacks._call_batch_hook(mode, 'begin', current_step, batch_logs)
try:
_, batch_outs = K.batch_get_value([test_op, output_tensors])
except errors.OutOfRangeError:
warning_msg = 'Make sure that your dataset can generate at least '
'`steps` batches (in this case, {} batches).'.format(steps)
logging.warning('Your dataset iterator ran out of data; '
'interrupting evaluation. ' + warning_msg)
target_steps = current_step
break
for i, label in enumerate(model.metrics_names):
if i == 0:
# Loss is stateless metrics.
outs[i] += batch_outs[label]
else:
# For all stateful metrics, the aggregation is handled by mirrored vars.
outs[i] = batch_outs[label]
batch_logs = cbks.make_logs(model, batch_logs, outs, mode)
callbacks._call_batch_hook(mode, 'end', current_step, batch_logs)
if verbose == 1:
progbar.update(current_step + 1)
current_step += 1
if verbose >= 1:
# Progress bar finishes at the end.
progbar.update(target_steps)
callbacks._call_end_hook(mode)
scope.__exit__(None, None, None)
if len(outs) >= 0:
outs[0] /= (target_steps)
if len(outs) == 1:
return outs[0]
return outs
def experimental_tpu_predict_loop(model,
dataset,
verbose=0,
steps=None,
callbacks=None):
"""Predict loop for predicting with TPU tf.distribute.Strategy.
Arguments:
model: Keras Model instance.
dataset: Dataset for input data.
verbose: Integer, Verbosity mode 0 or 1.
steps: Total number of steps (batches of samples)
before declaring `_predict_loop` finished.
Ignored with the default value of `None`.
callbacks: List of callbacks to be called during training
Returns:
Array of predictions (if the model has a single output)
or list of arrays of predictions
(if the model has multiple outputs).
"""
mode = ModeKeys.PREDICT
dataset_fully_shaped = dist_utils.is_dataset_shape_fully_defined(dataset)
padding_handler = None
if not dataset_fully_shaped:
# TODO(hongjunchoi): Investigate whether operations from
# PartialBatchPaddingHandler are unnecessarily pruned out
# during graph optimization.
padding_handler = padding_util.PartialBatchPaddingHandler(
model._feed_output_shapes)
batch_size, _, prefetch_buffer = input_lib._get_dataset_attributes(dataset)
padding_handler.padded_batch_size = batch_size
padding_handler.padding_mask = dataset.reduce(padding_handler.padding_mask,
padding_handler.update_mask)
dataset = dataset.map(padding_handler.pad_batch)
dataset = dataset.unbatch()
# Upon this point, it is guaranteed that the dataset does not
# have partial batches. Thus, we set `drop_remainder=True` to
# get static shape information about the elements in the dataset.
dataset = dataset.batch(batch_size, drop_remainder=True)
if prefetch_buffer is not None:
dataset = dataset.prefetch(prefetch_buffer)
current_strategy = model._distribution_strategy
iterator = dist_utils.get_iterator(dataset, current_strategy)
scope = dist_utils.distributed_scope(
strategy=current_strategy, learning_phase=0)
scope.__enter__()
def _predict_step_fn(inputs):
"""A fn that returns output of single prediction step."""
(distribution_strategy_context.get_replica_context().merge_call(
_build_model, args=(model, mode, inputs)))
(_, outputs, updates, _) = _per_replica_execution_function(
dist_utils.get_distributed_model(model, mode), mode)
with ops.control_dependencies([updates]):
return [array_ops.identity(out) for out in outputs]
# TODO(hongjunchoi): When numpy array is passed as an input to `predict()`
# use numpy arrays directly to avoid cumulating unnecessary input pipeline
# ops.
predict_input_data = iterator.get_next()
per_replica_outputs = current_strategy.run(
_predict_step_fn, args=(predict_input_data,))
output_tensors = dist_utils.flatten_per_replica_values(
current_strategy, per_replica_outputs)
if verbose >= 1:
progbar = Progbar(target=steps)
if model._compile_distribution:
dist_utils._copy_weights_to_distributed_model(model, mode)
dist_utils._reset_metrics(model)
callbacks = cbks.configure_callbacks(
callbacks,
model,
do_validation=False,
epochs=1,
steps_per_epoch=steps,
verbose=verbose,
count_mode='steps',
mode=mode)
callbacks._call_begin_hook(mode)
# Since we do not know how many samples we will see, we cannot pre-allocate
# the returned Numpy arrays. Instead, we store one array per batch seen
# and concatenate them upon returning.
num_model_outputs = len(model.output_names)
unconcatenated_outs = [[] for _ in range(num_model_outputs)]
if steps is not None:
target_steps = steps
else:
raise ValueError('Number of steps could not be inferred from the data, '
'please pass the steps argument.')
current_step = 0
while current_step < target_steps:
batch_logs = {'batch': current_step, 'size': 1}
callbacks._call_batch_hook(mode, 'begin', current_step, batch_logs)
try:
predict_ops = control_flow_ops.group(output_tensors)
_, batch_outs = K.batch_get_value([predict_ops, output_tensors])
except errors.OutOfRangeError:
warning_msg = 'Make sure that your dataset can generate at least '
'`steps` batches (in this case, {} batches).'.format(steps)
logging.warning('Your dataset iterator ran out of data; '
'interrupting evaluation. ' + warning_msg)
break
# TODO(priyag): maybe need to unwrap the outputs first for MirroredStrategy.
for i in range(num_model_outputs):
output_start_index = i * current_strategy.num_replicas_in_sync
output_end_index = (
output_start_index + current_strategy.num_replicas_in_sync)
single_model_output = batch_outs[output_start_index:output_end_index]
unconcatenated_outs[i].extend(single_model_output)
batch_logs = cbks.make_logs(model, batch_logs, batch_outs, mode)
callbacks._call_batch_hook(mode, 'end', current_step, batch_logs)
if verbose == 1:
progbar.update(current_step + 1)
current_step += 1
if verbose >= 1:
# Progress bar finishes at the end.
progbar.update(current_step)
callbacks._call_end_hook(mode)
scope.__exit__(None, None, None)
if len(unconcatenated_outs) == 1:
prediction_result = np.concatenate(unconcatenated_outs[0], axis=0)
else:
prediction_result = [
np.concatenate(out, axis=0) for out in unconcatenated_outs
]
if padding_handler:
prediction_result = padding_handler.apply_mask(prediction_result)
return prediction_result
class DistributionSingleWorkerTrainingLoop(training_utils.TrainingLoop):
"""Training loop for distribution strategy with single worker."""
def fit(self,
model,
x=None,
y=None,
batch_size=None,
epochs=1,
verbose=1,
callbacks=None,
validation_split=0.,
validation_data=None,
shuffle=True,
class_weight=None,
sample_weight=None,
initial_epoch=0,
steps_per_epoch=None,
validation_steps=None,
validation_freq=1,
**kwargs):
"""Fit loop for Distribution Strategies."""
dist_utils.validate_callbacks(input_callbacks=callbacks,
optimizer=model.optimizer)
dist_utils.validate_inputs(x, y)
batch_size, steps_per_epoch = dist_utils.process_batch_and_step_size(
model._distribution_strategy,
x,
batch_size,
steps_per_epoch,
ModeKeys.TRAIN,
validation_split=validation_split)
batch_size = model._validate_or_infer_batch_size(
batch_size, steps_per_epoch, x)
dataset = model._distribution_standardize_user_data(
x, y,
sample_weight=sample_weight,
class_weight=class_weight,
batch_size=batch_size,
validation_split=validation_split,
shuffle=shuffle,
epochs=epochs)
if not dist_utils.is_distributing_by_cloning(model):
with model._distribution_strategy.scope():
(dataset, _, _) = model._standardize_user_data(
dataset,
sample_weight=sample_weight,
class_weight=class_weight,
batch_size=batch_size,
validation_split=validation_split,
shuffle=shuffle)
val_dataset = None
if validation_data:
val_x, val_y, val_sample_weights = training_utils.unpack_validation_data(
validation_data)
dist_utils.validate_inputs(val_x, val_y)
_, validation_steps = dist_utils.process_batch_and_step_size(
model._distribution_strategy, val_x, batch_size, validation_steps,
ModeKeys.TEST)
val_dataset = model._distribution_standardize_user_data(
val_x, val_y,
sample_weight=val_sample_weights,
class_weight=None,
batch_size=batch_size,
validation_split=validation_split,
shuffle=shuffle,
allow_partial_batch=True)
elif validation_split:
raise ValueError('validation_split argument is not supported with '
'distribution strategies.')
if dist_utils.is_tpu_strategy(model._distribution_strategy):
steps_per_epoch = training_utils.infer_steps_for_dataset(
model, dataset, steps_per_epoch, epochs, steps_name='steps_per_epoch')
if steps_per_epoch is None:
raise ValueError('Number of steps could not be inferred from the data, '
'please pass the steps_per_epoch argument.')
if not context.executing_eagerly():
# Run TPU training in a custom loop in graph mode.
return experimental_tpu_fit_loop(
model,
dataset,
epochs=epochs,
verbose=verbose,
callbacks=callbacks,
val_dataset=val_dataset,
initial_epoch=initial_epoch,
steps_per_epoch=steps_per_epoch,
validation_steps=validation_steps,
validation_freq=validation_freq)
return training_arrays.fit_loop(
model,
dataset,
batch_size=batch_size,
epochs=epochs,
verbose=verbose,
callbacks=callbacks,
val_inputs=val_dataset,
shuffle=shuffle,
initial_epoch=initial_epoch,
steps_per_epoch=steps_per_epoch,
validation_steps=validation_steps,
validation_freq=validation_freq,
steps_name='steps_per_epoch')
def evaluate(self,
model,
x=None,
y=None,
batch_size=None,
verbose=1,
sample_weight=None,
steps=None,
callbacks=None,
**kwargs):
"""Evaluate loop for Distribution Strategies."""
dist_utils.validate_inputs(x, y)
batch_size, steps = dist_utils.process_batch_and_step_size(
model._distribution_strategy, x, batch_size, steps, ModeKeys.TEST)
batch_size = model._validate_or_infer_batch_size(batch_size, steps, x)
dataset = model._distribution_standardize_user_data(
x, y,
sample_weight=sample_weight,
batch_size=batch_size,
allow_partial_batch=True)
if dist_utils.is_tpu_strategy(model._distribution_strategy):
steps = training_utils.infer_steps_for_dataset(
model, dataset, steps, steps_name='steps')
if steps is None:
raise ValueError('Number of steps could not be inferred from the data, '
'please pass the steps argument.')
if not context.executing_eagerly():
# Run TPU evaluation in a custom loop in graph mode.
return experimental_tpu_test_loop(
model, dataset, verbose=verbose, steps=steps, callbacks=callbacks)
return training_arrays.test_loop(
model,
inputs=dataset,
batch_size=batch_size,
verbose=verbose,
steps=steps,
callbacks=callbacks)
def predict(self,
model,
x,
batch_size=None,
verbose=0,
steps=None,
callbacks=None,
**kwargs):
"""Predict loop for Distribution Strategies."""
dist_utils.validate_inputs(x=x, y=None)
batch_size, steps = dist_utils.process_batch_and_step_size(
model._distribution_strategy, x, batch_size, steps, ModeKeys.PREDICT)
batch_size = model._validate_or_infer_batch_size(batch_size, steps, x)
dataset = model._distribution_standardize_user_data(
x,
batch_size=batch_size,
allow_partial_batch=True)
if dist_utils.is_tpu_strategy(model._distribution_strategy):
steps = training_utils.infer_steps_for_dataset(
model, dataset, steps, steps_name='steps')
if steps is None:
raise ValueError('Number of steps could not be inferred from the data, '
'please pass the steps argument.')
if not context.executing_eagerly():
return experimental_tpu_predict_loop(
model, dataset, verbose=verbose, steps=steps, callbacks=callbacks)
return training_arrays.predict_loop(
model,
dataset,
batch_size=batch_size,
verbose=verbose,
steps=steps,
callbacks=callbacks)
def _train_with_multi_worker(method):
"""Decorator that handles multi worker training with distribution strategy."""
def wrapper(model, **kwargs):
def _worker_fn(_):
callbacks = kwargs.pop('callbacks', None)
filtered_callbacks = dist_utils.filter_distributed_callbacks(
callbacks, model)
kwargs['callbacks'] = filtered_callbacks
return method(model, **kwargs)
return dc.run_distribute_coordinator(
_worker_fn,
model._distribution_strategy,
mode=dc.CoordinatorMode.INDEPENDENT_WORKER)
return wrapper
class DistributionMultiWorkerTrainingLoop(training_utils.TrainingLoop):
"""Training loop for distribution strategy with multiple worker."""
def __init__(self, single_worker_loop):
self._single_worker_loop = single_worker_loop
def fit(self, *args, **kwargs):
return _train_with_multi_worker(self._single_worker_loop.fit)(
*args, **kwargs)
def evaluate(self, *args, **kwargs):
return _train_with_multi_worker(self._single_worker_loop.evaluate)(
*args, **kwargs)
def predict(self, *args, **kwargs):
# Currently predict is still using the single worker implementation.
return self._single_worker_loop.predict(*args, **kwargs)
|
|
# -*- coding: utf-8 -*-
"""
Created on Tue Jan 26 12:57:46 2016
@author: zehl
"""
import datetime
import os
import sys
import traceback
from PyQt5 import QtCore, QtWidgets, QtGui
# import wizards
from .compsectionwiz import CompSectionWizard
from .converterwiz import ConversionWizard
from .filterwiz import FilterWizard
from .generatetemplatewiz import GenerateTemplateWizard
from .mergewiz import MergeWizard
from .wizutils import get_graphic_path
from odmltables import VERSION
def handle_exception(exc_type, exc_value, exc_traceback):
""" handle all exceptions """
error_logfile = os.path.join(os.path.expanduser("~"),
'.odmltables',
'error.log')
## KeyboardInterrupt is a special case.
## We don't raise the error dialog when it occurs.
if issubclass(exc_type, KeyboardInterrupt):
if QtWidgets.qApp:
QtWidgets.qApp.quit()
return
filename, lineid, func, line = traceback.extract_tb(exc_traceback).pop()
filename = os.path.basename(filename)
error = "%s: %s" % (exc_type.__name__, exc_value)
complete_error = "".join(traceback.format_exception(exc_type,
exc_value,
exc_traceback))
msg_text = ("<html><b>%s</b><br><br>"
"Please check your odMLtables settings and inputfiles for "
"consistency. In case you found a bug in odMLtables please "
"contact the odMLtables team "
"<i>https://github.com/INM-6/python-odmltables/issues</i>."
"<br><br>"
"For a detailed error report see log file at <i>%s</i>"
"</html>" % (
error.replace('<', '').replace('>', ''), error_logfile))
QtWidgets.QMessageBox.critical(None,
"Unexpected Error in odMLtables",
msg_text)
print("An error occurred. This is the full error report:")
print()
print(complete_error)
now = str(datetime.datetime.now())
errorpath = os.path.dirname(error_logfile)
if not os.path.exists(errorpath):
os.makedirs(errorpath)
with open(error_logfile, "a+") as myfile:
myfile.writelines(['################### %s ###################\n' % now,
complete_error, '\n'])
# sys.exit(1)
class MainWindow(QtWidgets.QMainWindow):
def __init__(self):
super(MainWindow, self).__init__()
self.initUI()
# install handler for exceptions
sys.excepthook = handle_exception
def initUI(self):
centralWidget = QtWidgets.QWidget()
w, h = 450, 450
self.setFixedSize(w, h)
self.setCentralWidget(centralWidget)
# background color
centralWidget.setAutoFillBackground(True)
p = centralWidget.palette()
gradient = QtGui.QRadialGradient(w / 2, h / 3, h / 1, w / 2, h)
# gradient = QtGui.QRadialGradient(w / 2, h / 2, h / 1, w / 2, h)
gradient.setColorAt(0.0, QtGui.QColor(240, 240, 240))
gradient.setColorAt(1.0, QtGui.QColor(0, 76, 153))
p.setBrush(QtGui.QPalette.Window, QtGui.QBrush(gradient))
# p.setColor(centralWidget.backgroundRole(),QtGui.QColor(153,204,255))
# (255,128,0) # for button background
centralWidget.setPalette(p)
vbox = QtWidgets.QVBoxLayout()
titlebox = QtWidgets.QHBoxLayout()
vbox.addLayout(titlebox)
subtitlebox = QtWidgets.QVBoxLayout()
titlebox.addLayout(subtitlebox)
subtitlebox.addSpacing(8)
title_font = QtGui.QFont()
# title_font.setFamily("Verdana")
title_font.setBold(True)
title_font.setPointSize(14)
label = QtWidgets.QLabel(f"odMLtables version {VERSION}")
label.setFont(title_font)
pal = QtGui.QPalette(label.palette())
pal.setColor(QtGui.QPalette.WindowText, QtGui.QColor(QtCore.Qt.black))
label.setPalette(pal)
subtitlebox.addWidget(label)
subtitlebox.addSpacing(5)
subtitle = QtWidgets.QLabel('Select one of the actions below')
subtitle.setPalette(pal)
subtitlebox.addWidget(subtitle)
# subtitlebox.addSpacing(10)
grid = QtWidgets.QGridLayout()
grid.setColumnStretch(0, 1)
grid.setColumnStretch(1, 1)
vbox.addLayout(grid)
self.convertbutton = self.generate_button('Convert between odml\nand '
'table format',
"convertodml.svg")
self.comparebutton = self.generate_button('Compare entries within\nan '
'odml',
"comparetable.svg")
self.generatebutton = self.generate_button('Generate new table',
"createtemplate.svg")
self.filterbutton = self.generate_button('Filter content of odml\n',
"filterodml.svg")
self.mergebutton = self.generate_button('Merge contents of odmls\n',
"mergeodml.svg")
icon = QtWidgets.QLabel()
# icon.setGeometry(10, 10, 4, 100)
logo_filename = "odMLtables_100x100.png"
logo_dirs = [os.path.join(os.path.dirname(__file__), '..', '..', 'logo'),
os.path.join(sys.prefix, 'share/pixmaps')]
for logo_dir in logo_dirs:
filepath = os.path.join(logo_dir, logo_filename)
if os.path.exists(filepath):
icon.setPixmap(QtGui.QPixmap(filepath))
grid.addWidget(self.convertbutton, 0, 0, 1, 2, QtCore.Qt.AlignCenter)
grid.addWidget(self.comparebutton, 1, 1)
grid.addWidget(self.generatebutton, 1, 0)
grid.addWidget(self.filterbutton, 2, 1)
grid.addWidget(self.mergebutton, 2, 0)
titlebox.addWidget(icon)
self.setGeometry(300, 300, 350, 250)
self.setWindowTitle('odMLtables')
centralWidget.setLayout(vbox)
self.show()
def generate_button(self, text, graphic_name):
graphic_path = get_graphic_path()
button = QtWidgets.QToolButton()
button.setText(self.tr(text))
button.setIcon(QtGui.QIcon(os.path.join(graphic_path, graphic_name)))
button.setIconSize(QtCore.QSize(120, 60))
button.setToolButtonStyle(QtCore.Qt.ToolButtonTextUnderIcon)
button.setFixedWidth(200)
button.setFixedHeight(100)
button.clicked.connect(self.startWizard)
button.setStyleSheet(
'QToolButton {'
'background-color:#FF9955;'
'border: 2px solid #404040;'
'border-radius: 5px;'
'color: black};' # 'FF7F2A'
'QToolButton:hover{'
'background-color:red};'
)
return button
def startWizard(self):
sender = self.sender()
if sender == self.convertbutton:
wizard = ConversionWizard()
elif sender == self.comparebutton:
wizard = CompSectionWizard()
elif sender == self.generatebutton:
wizard = GenerateTemplateWizard()
elif sender == self.filterbutton:
wizard = FilterWizard()
elif sender == self.mergebutton:
wizard = MergeWizard()
else:
raise EnvironmentError('Unknown sender')
wizard.exec_()
|
|
import libtaxii.messages_10 as tm10
from . import constants as const
from .abstract import AbstractClient
from .converters import (
to_subscription_response_entity, to_content_block_entity,
to_collection_entities
)
from .exceptions import NotSupportedError
from .utils import (
pack_content_bindings, get_utc_now, pack_content_binding
)
class Client10(AbstractClient):
'''Client implementation for TAXII Specification v1.0
Use :py:meth:`cabby.create_client` to create client instances.
'''
taxii_binding = const.XML_10_BINDING
services_version = const.TAXII_SERVICES_10
def _discovery_request(self, uri):
request = tm10.DiscoveryRequest(message_id=self._generate_id())
response = self._execute_request(request, uri=uri)
return response
def __subscription_status_request(self, action, collection_name,
subscription_id=None, uri=None):
request_parameters = dict(
message_id=self._generate_id(),
action=action,
feed_name=collection_name,
subscription_id=subscription_id
)
request = tm10.ManageFeedSubscriptionRequest(**request_parameters)
response = self._execute_request(
request, uri=uri, service_type=const.SVC_FEED_MANAGEMENT)
return to_subscription_response_entity(response, version=10)
def get_subscription_status(self, collection_name, subscription_id=None,
uri=None):
'''Get subscription status from TAXII Feed Management service.
Sends a subscription request with action `STATUS`.
If no ``subscription_id`` is provided, server will return
the list of all available subscriptions for feed with a name
specified in ``collection_name``.
if ``uri`` is not provided, client will try to discover services and
find Feed Management Service among them.
:param str collection_name: target feed name
:param str subscription_id: subscription ID (optional)
:param str uri: URI path to a specific Collection Management service
:return: subscription information response
:rtype: :py:class:`cabby.entities.SubscriptionResponse`
:raises ValueError:
if URI provided is invalid or schema is not supported
:raises `cabby.exceptions.HTTPError`:
if HTTP error happened
:raises `cabby.exceptions.UnsuccessfulStatusError`:
if Status Message received and status_type is not `SUCCESS`
:raises `cabby.exceptions.ServiceNotFoundError`:
if no service found
:raises `cabby.exceptions.AmbiguousServicesError`:
more than one service with type specified
:raises `cabby.exceptions.NoURIProvidedError`:
no URI provided and client can't discover services
'''
return self.__subscription_status_request(
const.ACT_STATUS, collection_name, subscription_id=subscription_id,
uri=uri)
def unsubscribe(self, collection_name, subscription_id, uri=None):
'''Unsubscribe from a subscription.
Sends a subscription request with action `UNSUBSCRIBE`.
Subscription is identified by ``collection_name`` and
``subscription_id``.
if ``uri`` is not provided, client will try to discover services and
find Collection Management Service among them.
:param str collection_name: target feed name
:param str subscription_id: subscription ID
:param str uri: URI path to a specific TAXII service
:return: subscription information response
:rtype: :py:class:`cabby.entities.SubscriptionResponse`
:raises ValueError:
if URI provided is invalid or schema is not supported
:raises `cabby.exceptions.HTTPError`:
if HTTP error happened
:raises `cabby.exceptions.UnsuccessfulStatusError`:
if Status Message received and status_type is not `SUCCESS`
:raises `cabby.exceptions.ServiceNotFoundError`:
if no service found
:raises `cabby.exceptions.AmbiguousServicesError`:
more than one service with type specified
:raises `cabby.exceptions.NoURIProvidedError`:
no URI provided and client can't discover services
'''
return self.__subscription_status_request(
const.ACT_UNSUBSCRIBE, collection_name,
subscription_id=subscription_id, uri=uri)
def subscribe(self, collection_name, inbox_service=None,
content_bindings=None, uri=None, count_only=False):
'''Create a subscription.
Sends a subscription request with action `SUBSCRIBE`.
if ``uri`` is not provided, client will try to discover services and
find Collection Management Service among them.
Content Binding subtypes are not supported in TAXII Specification v1.0.
:param str collection_name: target feed name
:param `cabby.entities.InboxService` inbox_service:
Inbox Service that will accept content pushed by TAXII Server
in the context of this subscription
:param list content_bindings: a list of strings or
:py:class:`cabby.entities.ContentBinding` entities
:param str uri: URI path to a specific Collection Management service
:param bool count_only: IGNORED. Count Only is not supported in
TAXII 1.0 and added here only for method unification purpose.
:return: subscription information response
:rtype: :py:class:`cabby.entities.SubscriptionResponse`
:raises ValueError:
if URI provided is invalid or schema is not supported
:raises `cabby.exceptions.HTTPError`:
if HTTP error happened
:raises `cabby.exceptions.UnsuccessfulStatusError`:
if Status Message received and status_type is not `SUCCESS`
:raises `cabby.exceptions.ServiceNotFoundError`:
if no service found
:raises `cabby.exceptions.AmbiguousServicesError`:
more than one service with type specified
:raises `cabby.exceptions.NoURIProvidedError`:
no URI provided and client can't discover services
'''
request_parameters = dict(
message_id=self._generate_id(),
action=const.ACT_SUBSCRIBE,
feed_name=collection_name,
)
if inbox_service:
binding = (inbox_service.message_bindings[0]
if inbox_service.message_bindings else '')
delivery_parameters = tm10.DeliveryParameters(
inbox_protocol=inbox_service.protocol,
inbox_address=inbox_service.address,
delivery_message_binding=binding
)
if content_bindings:
delivery_parameters['content_bindings'] = [
tm10.ContentBinding(cb.binding_id)
for cb in content_bindings]
request_parameters['delivery_parameters'] = delivery_parameters
request = tm10.ManageFeedSubscriptionRequest(**request_parameters)
response = self._execute_request(
request, uri=uri, service_type=const.SVC_FEED_MANAGEMENT)
return to_subscription_response_entity(response, version=10)
def push(self, content, content_binding, uri=None, timestamp=None):
'''Push content into Inbox Service.
if ``uri`` is not provided, client will try to discover services and
find Inbox Service among them.
Content Binding subtypes and Destionation collections are not
supported in TAXII Specification v1.0.
:param str content: content to push
:param content_binding: content binding for a content
:type content_binding: string or
:py:class:`cabby.entities.ContentBinding`
:param datetime timestamp: timestamp label of the content block
(current UTC time by default)
:param str uri: URI path to a specific Inbox Service
:raises ValueError:
if URI provided is invalid or schema is not supported
:raises `cabby.exceptions.HTTPError`:
if HTTP error happened
:raises `cabby.exceptions.UnsuccessfulStatusError`:
if Status Message received and status_type is not `SUCCESS`
:raises `cabby.exceptions.ServiceNotFoundError`:
if no service found
:raises `cabby.exceptions.AmbiguousServicesError`:
more than one service with type specified
:raises `cabby.exceptions.NoURIProvidedError`:
no URI provided and client can't discover services
'''
content_block = tm10.ContentBlock(
content=content,
content_binding=pack_content_binding(content_binding, version=10),
timestamp_label=timestamp or get_utc_now()
)
inbox_message = tm10.InboxMessage(message_id=self._generate_id(),
content_blocks=[content_block])
self._execute_request(inbox_message, uri=uri,
service_type=const.SVC_INBOX)
self.log.debug("Content block successfully pushed")
def get_collections(self, uri=None):
'''Get collections from Feed Management Service.
if ``uri`` is not provided, client will try to discover services and
find Feed Management Service among them.
:param str uri: URI path to a specific Feed Management service
:return: list of collections
:rtype: list of :py:class:`cabby.entities.Collection`
:raises ValueError:
if URI provided is invalid or schema is not supported
:raises `cabby.exceptions.HTTPError`:
if HTTP error happened
:raises `cabby.exceptions.UnsuccessfulStatusError`:
if Status Message received and status_type is not `SUCCESS`
:raises `cabby.exceptions.ServiceNotFoundError`:
if no service found
:raises `cabby.exceptions.AmbiguousServicesError`:
more than one service with type specified
:raises `cabby.exceptions.NoURIProvidedError`:
no URI provided and client can't discover services
'''
request = tm10.FeedInformationRequest(message_id=self._generate_id())
response = self._execute_request(
request, uri=uri, service_type=const.SVC_FEED_MANAGEMENT)
return to_collection_entities(response.feed_informations, version=10)
def get_content_count(self, *args, **kwargs):
'''Not supported in TAXII 1.0
:raises `cabby.exceptions.NotSupportedError`:
not supported in TAXII 1.0
'''
raise NotSupportedError(self.taxii_version)
def poll(self, collection_name, begin_date=None, end_date=None,
subscription_id=None, content_bindings=None, uri=None):
'''Poll content from Polling Service.
if ``uri`` is not provided, client will try to discover services and
find Polling Service among them.
:param str collection_name: feed to poll
:param datetime begin_date:
ask only for content blocks created after
`begin_date` (exclusive)
:param datetime end_date:
ask only for content blocks created before
`end_date` (inclusive)
:param str subsctiption_id: ID of the existing subscription
:param list content_bindings:
list of stings or
:py:class:`cabby.entities.ContentBinding` objects
:param str uri: URI path to a specific Inbox Service
:raises ValueError:
if URI provided is invalid or schema is not supported
:raises `cabby.exceptions.HTTPError`:
if HTTP error happened
:raises `cabby.exceptions.UnsuccessfulStatusError`:
if Status Message received and status_type is not `SUCCESS`
:raises `cabby.exceptions.ServiceNotFoundError`:
if no service found
:raises `cabby.exceptions.AmbiguousServicesError`:
more than one service with type specified
:raises `cabby.exceptions.NoURIProvidedError`:
no URI provided and client can't discover services
'''
_bindings = pack_content_bindings(content_bindings, version=10)
data = dict(
message_id=self._generate_id(),
feed_name=collection_name,
exclusive_begin_timestamp_label=begin_date,
inclusive_end_timestamp_label=end_date,
content_bindings=_bindings
)
if subscription_id:
data['subscription_id'] = subscription_id
request = tm10.PollRequest(**data)
stream = self._execute_request(request, uri=uri,
service_type=const.SVC_POLL)
for obj in stream:
if isinstance(obj, tm10.ContentBlock):
yield to_content_block_entity(obj)
def fulfilment(self, *args, **kwargs):
'''Not supported in TAXII 1.0
:raises `cabby.exceptions.NotSupportedError`:
'''
raise NotSupportedError(self.taxii_version)
|
|
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import random
import re
import threading
import numpy as np
import pytest
import tvm
import tvm.testing
from tvm import relay, te
from tvm.topi.math import cast
def randint_loguniform(low=1, high=32768, size=None):
logN = np.random.uniform(low=np.log(low), high=np.log(high), size=size)
N = np.exp(logN).astype(int)
return np.unique(N)
dtype = tvm.testing.parameter("float32", "int32", "float16", "int8")
fuzz_arr_size = tvm.testing.parameter(*randint_loguniform(size=25))
# Explicitly specify a target, as this test is looking at the
# generated shader code, and is not running on an actual device.
@tvm.testing.parametrize_targets(
" ".join(
[
"vulkan",
"-supports_int8=1",
"-supports_8bit_buffer=1",
"-supports_storage_buffer_storage_class=1",
"-supports_float16=1",
"-supports_16bit_buffer=1",
]
)
)
def test_vector_comparison(target, dtype):
n = (1024,)
A = te.placeholder(n, dtype=dtype, name="A")
B = te.compute(
A.shape,
lambda i: tvm.tir.Select(
A[i] >= 0, A[i] + tvm.tir.const(1, dtype), tvm.tir.const(0, dtype)
),
name="B",
)
s = te.create_schedule(B.op)
(bx, tx) = s[B].split(s[B].op.axis[0], factor=128)
(tx, vx) = s[B].split(tx, factor=4)
s[B].bind(bx, te.thread_axis("blockIdx.x"))
s[B].bind(tx, te.thread_axis("threadIdx.x"))
s[B].vectorize(vx)
f = tvm.build(s, [A, B], target)
# Verify we generate the boolx4 type declaration and the OpSelect
# v4{float,half,int} instruction
assembly = f.imported_modules[0].get_source()
matches = re.findall("%v4bool = OpTypeVector %bool 4", assembly)
assert len(matches) == 1
matches = re.findall("OpSelect %v4.*", assembly)
assert len(matches) == 1
def test_array_copy(dev, dtype, fuzz_arr_size):
a_np = np.random.uniform(size=(fuzz_arr_size,)).astype(dtype)
a = tvm.nd.empty((fuzz_arr_size,), dtype, dev).copyfrom(a_np)
b_np = a.numpy()
tvm.testing.assert_allclose(a_np, b_np)
tvm.testing.assert_allclose(a_np, a.numpy())
@tvm.testing.exclude_targets("llvm")
def test_array_vectorize_add(target, dev, dtype):
arr_size = 64
lanes = 2
num_thread = 8
A = te.placeholder((arr_size,), name="A", dtype="%sx%d" % (dtype, lanes))
B = te.compute((arr_size,), lambda i: A[i] + tvm.tir.const(1, A.dtype), name="B")
s = te.create_schedule(B.op)
xo, xi = s[B].split(B.op.axis[0], factor=num_thread)
s[B].bind(xo, te.thread_axis("blockIdx.x"))
s[B].bind(xi, te.thread_axis("threadIdx.x"))
fun = tvm.build(s, [A, B], target)
a = tvm.nd.empty((arr_size,), A.dtype, dev).copyfrom(np.random.uniform(size=(arr_size, lanes)))
c = tvm.nd.empty((arr_size,), B.dtype, dev)
fun(a, c)
tvm.testing.assert_allclose(c.numpy(), a.numpy() + 1)
@tvm.testing.parametrize_targets("vulkan")
def test_vulkan_stress(target, dev):
"""
Launch a randomized test with multiple kernels per stream, multiple uses of
kernels per stream, over multiple threads.
"""
n = 1024
num_thread = 64
def run_stress():
def worker():
A = te.placeholder((n,), name="A", dtype="float32")
B = te.placeholder((n,), name="B", dtype="float32")
functions = [
(
lambda: te.compute((n,), lambda i: 2 * A[i] + 3 * B[i]),
lambda a, b: 2 * a + 3 * b,
),
(lambda: te.compute((n,), lambda i: A[i] + B[i]), lambda a, b: a + b),
(lambda: te.compute((n,), lambda i: A[i] + 2 * B[i]), lambda a, b: a + 2 * b),
]
def build_f(f_ref):
(C_f, ref) = f_ref
C = C_f()
s = te.create_schedule(C.op)
xo, xi = s[C].split(C.op.axis[0], factor=num_thread)
s[C].bind(xo, te.thread_axis("blockIdx.x"))
s[C].bind(xi, te.thread_axis("threadIdx.x"))
fun = tvm.build(s, [A, B, C], target)
return (fun, ref)
fs = [
build_f(random.choice(functions)) for _ in range(np.random.randint(low=1, high=10))
]
a = tvm.nd.empty((n,), A.dtype, dev).copyfrom(np.random.uniform(size=(n,)))
b = tvm.nd.empty((n,), B.dtype, dev).copyfrom(np.random.uniform(size=(n,)))
cs = [tvm.nd.empty((n,), A.dtype, dev) for _ in fs]
for ((f, _), c) in zip(fs, cs):
f(a, b, c)
for ((_, ref), c) in zip(fs, cs):
tvm.testing.assert_allclose(c.numpy(), ref(a.numpy(), b.numpy()))
ts = [threading.Thread(target=worker) for _ in range(np.random.randint(1, 10))]
for t in ts:
t.start()
for t in ts:
t.join()
run_stress()
@tvm.testing.exclude_targets("llvm")
def test_vulkan_bool_load(target, dev):
arr_size = 1024
target = tvm.target.Target(target)
if target.kind.name == "vulkan":
supports_int8_buffer = target.attrs.get("supports_int8", False) and target.attrs.get(
"supports_8bit_buffer", False
)
if not supports_int8_buffer:
pytest.xfail(
"Vulkan target does not support int8 buffer access, used to transfer booleans"
)
def do_copy(A, B, n):
ib = tvm.tir.ir_builder.create()
A = ib.buffer_ptr(A)
B = ib.buffer_ptr(B)
tx = te.thread_axis("threadIdx.x")
bx = te.thread_axis("blockIdx.x")
max_threads = 32
ib.scope_attr(bx, "thread_extent", tvm.tir.indexdiv(n + max_threads - 1, max_threads))
ib.scope_attr(tx, "thread_extent", max_threads)
tid = bx * max_threads + tx
with ib.if_scope(tid < n):
B[tid] = cast(A[tid], "int32")
return ib.get()
A = te.placeholder((arr_size,), name="A", dtype="bool")
B = te.placeholder((arr_size,), name="B", dtype="int32")
B = te.extern(
A.shape,
[A],
lambda ins, outs: do_copy(ins[0], outs[0], arr_size),
name="bool_copy_ir",
dtype="int32",
)
s = te.create_schedule(B.op)
with tvm.transform.PassContext(opt_level=3):
func = tvm.build(s, [A, B], target)
a_np = np.random.uniform(size=arr_size) > 0.5
b_np = np.zeros((arr_size,), dtype="int32")
a = tvm.nd.array(a_np, dev)
b = tvm.nd.array(b_np, dev)
func(a, b)
ref = a_np.astype(np.int32)
tvm.testing.assert_allclose(b.numpy(), ref)
def check_mod(target, dev, mod, x_np, res_np):
res = relay.create_executor("vm", mod=mod, device=dev, target=target).evaluate()(x_np).numpy()
tvm.testing.assert_allclose(res, res_np, atol=1e-5)
def test_sqrt(target, dev):
# Three 32 bit pushconstants: any_dim, stride, stride
dtype = "float32"
x = relay.var("x", shape=(relay.Any(),), dtype=dtype)
mod = tvm.IRModule()
mod["main"] = relay.Function([x], relay.sqrt(x))
x_np = np.random.uniform(size=(10,)).astype(dtype)
res_np = np.sqrt(x_np)
check_mod(target, dev, mod, x_np, res_np)
def test_argsort(target, dev):
# One 64 bit and one 32 bit constants
dtype = "int32"
x = relay.var("x", shape=(relay.Any(),), dtype=dtype)
mod = tvm.IRModule()
mod["main"] = relay.Function([x], relay.argsort(x))
x_np = np.random.randint(0, high=10, size=(10,)).astype(dtype)
res_np = np.argsort(x_np)
check_mod(target, dev, mod, x_np, res_np)
def test_cumsum(target, dev):
# One 64 bit and one 32 bit constants
dtype = "int32"
x = relay.var("x", shape=(relay.Any(),), dtype=dtype)
mod = tvm.IRModule()
mod["main"] = relay.Function([x], relay.cumsum(x))
x_np = np.random.randint(0, high=10, size=(10,)).astype(dtype)
res_np = np.cumsum(x_np)
check_mod(target, dev, mod, x_np, res_np)
def test_unique(target, dev):
dtype = "int32"
x = relay.var("x", shape=(relay.Any(),), dtype=dtype)
mod = tvm.IRModule()
[unique, _, _, num_unique] = relay.unique(x, is_sorted=True)
mod["main"] = relay.Function([x], relay.op.strided_slice(unique, begin=[0], end=num_unique))
x_np = np.random.randint(0, high=10, size=(10,)).astype(dtype)
res_np = np.unique(x_np)
check_mod(target, dev, mod, x_np, res_np)
vulkan_parameter_impl = tvm.testing.parameter("push_constants", "ubo")
vulkan_parameter_dtype = tvm.testing.parameter("int32", "float32", "int64")
# Only run on vulkan because extremely large numbers of input
# parameters can crash cuda/llvm compiler.
@tvm.testing.parametrize_targets("vulkan -from_device=0")
def test_vulkan_constant_passing(target, dev, vulkan_parameter_impl, vulkan_parameter_dtype):
target = tvm.target.Target(target)
dtype = vulkan_parameter_dtype
if not target.attrs.get("supports_int64", False):
pytest.xfail("Vulkan target does not support Int64 variables")
# f_add has 3+num_int_params scalar parameters. The other three
# are length_n, stride1, and stride2.
if vulkan_parameter_impl == "push_constants":
# 4 params, 32 bytes. Within 128-byte spec-guaranteed size of
# push constants. Uses push constants.
num_int_params = 1
else:
# 24 params, 192 bytes. May be above spec-guaranteed size of 128
# bytes for push constants. Uses either push constants or UBO,
# depending on the device.
max_push_constants_size = int(target.attrs.get("max_push_constants_size", 128))
max_int_params_in_push = max_push_constants_size // 8 - 3
num_int_params = max_int_params_in_push + 1
n = te.var("n")
scalars = [te.var("scale{}".format(i), dtype=dtype) for i in range(num_int_params)]
scalar_sum = scalars[0]
for s in scalars[1:]:
scalar_sum += s
A = te.placeholder((n,), name="A", dtype=dtype)
B = te.compute(A.shape, lambda i: scalar_sum + A[i], name="B")
s = te.create_schedule(B.op)
xo, xi = s[B].split(B.op.axis[0], factor=64)
s[B].bind(xo, te.thread_axis("blockIdx.x"))
s[B].bind(xi, te.thread_axis("threadIdx.x"))
f_add = tvm.build(s, scalars + [A, B], target)
n = 1024
scalars = np.array([1 for _ in scalars]).astype(dtype)
a = tvm.nd.array(np.random.uniform(size=n).astype(A.dtype), dev)
b = tvm.nd.array(np.zeros(n, dtype=B.dtype), dev)
f_add(*scalars, a, b)
tvm.testing.assert_allclose(a.numpy() + sum(scalars), b.numpy())
def test_vulkan_while_if(target, dev):
target = tvm.target.Target(target)
def do_compute(A, B, n):
ib = tvm.tir.ir_builder.create()
A = ib.buffer_ptr(A)
B = ib.buffer_ptr(B)
if "gpu" in target.keys:
ib.scope_attr(te.thread_axis("blockIdx.x"), "thread_extent", 0)
iterations = ib.allocate("int32", (1,), name="iterations", scope="local")
iterations[0] = 0
B[0] = 0
# WhileNode's condition is re-evaluated every loop. The
# if_then_else block introduces additional labels/blocks that
# must be kept separate from the WhileNode's block.
loop_condition = iterations[0] < tvm.tir.if_then_else(A[0] > 0, 10, 20)
with ib.while_loop(loop_condition):
iterations[0] += 1
B[0] += iterations[0]
return ib.get()
n = 1
dtype = "int32"
A = te.placeholder((n,), name="A", dtype=dtype)
B = te.extern(
A.shape,
[A],
lambda ins, outs: do_compute(ins[0], outs[0], n),
dtype=dtype,
)
s = te.create_schedule(B.op)
# Point of failure would be here, at tvm.build.
with tvm.transform.PassContext(opt_level=3):
func = tvm.build(s, [A, B], target)
a = tvm.nd.array(np.array([5], dtype=A.dtype), dev)
b = tvm.nd.array(np.zeros(n, dtype=A.dtype), dev)
func(a, b)
tvm.testing.assert_allclose(b.numpy(), [55])
a = tvm.nd.array(np.array([-5], dtype=A.dtype), dev)
b = tvm.nd.array(np.zeros(n, dtype=A.dtype), dev)
func(a, b)
tvm.testing.assert_allclose(b.numpy(), [210])
@tvm.testing.exclude_targets("llvm")
def test_vulkan_local_threadidx(target, dev):
# To access the thread index, the vulkan runtime accesses a global
# array of thread indices, storing the result in a local variable.
# In CUDA, these are the built-in threadIdx.x variables, which are
# globally accessible. In vulkan, these local variables must be
# defined inside a function, but are hoisted up to the function
# header to mimic the global CUDA semantics. Before this
# hoisting, this test could trigger spvValidate errors for
# potentially undeclared variables.
def do_compute(A, B, n):
ib = tvm.tir.ir_builder.create()
A = ib.buffer_ptr(A)
B = ib.buffer_ptr(B)
# One single declaration of te.thread_axis.
tx = te.thread_axis("threadIdx.x")
with ib.for_range(0, 1):
# Used inside a for-loop scope, defines local thread_id
# variable.
ib.scope_attr(tx, "thread_extent", 16)
B[tx + 0] = A[tx + 0]
with ib.for_range(0, 1):
# Used in next scope. If local variable defined at point
# of use instead of function header, will fail spvValidate
# for access of out-of-scope local variable.
ib.scope_attr(tx, "thread_extent", 16)
B[tx + 16] = A[tx + 16]
return ib.get()
n = te.var("n")
A = te.placeholder((n,), name="A", dtype="int32")
B = te.placeholder((n,), name="B", dtype="int32")
B = te.extern(
A.shape,
[A],
lambda ins, outs: do_compute(ins[0], outs[0], n),
dtype="int32",
)
s = te.create_schedule(B.op)
# Expected failure occurs at build step.
func = tvm.build(s, [A, B], target)
n = 32
a_np = np.arange(n).astype(dtype=A.dtype)
b_np = np.zeros((n,), dtype="int32")
a = tvm.nd.array(a_np, dev)
b = tvm.nd.array(b_np, dev)
func(a, b)
tvm.testing.assert_allclose(b.numpy(), a_np)
class TestVectorizedIndices:
load_type, store_type = tvm.testing.parameters(
# Load N values, write to N locations.
# Vectorized copy.
("ramp", "ramp"),
# Load 1 value, write to N locations.
# Scalar load, vectorized store.
#
# Most TVM operations (e.g. schedule[tensor].vectorize(axis)) have
# the broadcast outside of the index, but it is semantically okay
# for the broadcast to be inside the index, and it shows up with
# some optimizations.
("broadcast", "ramp"),
# Load 1 values, write to 1 location.
# Broadcasting on both sides should be equivalent to a scalar copy.
("broadcast", "broadcast"),
# Loads N values, write to 1 location.
# Disabled as it would have unclear semantics.
# ("ramp","broadcoast"),
)
indirect_indices = tvm.testing.parameter(True, False, ids=["reorder", "no_reorder"])
@tvm.testing.fixture
def ref_data(self, load_type, store_type, indirect_indices):
n = 4
index_map = {
"ramp": np.arange(n),
"broadcast": np.zeros(n, dtype="int32"),
}
a_np = np.random.randint(np.iinfo("int32").max, size=n).astype("int32")
b_np = np.zeros(shape=n, dtype=a_np.dtype)
reorder_np = np.arange(n, dtype="int32")[::-1]
load_index = index_map[load_type]
store_index = index_map[store_type]
if indirect_indices:
load_index = reorder_np[load_index]
b_np[store_index] = a_np[load_index]
return a_np, reorder_np, b_np
@tvm.testing.fixture
def mod(self, target, load_type, store_type, indirect_indices):
target = tvm.target.Target(target)
n = 4
dtype = "int32"
A = te.placeholder((n,), dtype=dtype, name="A")
R = te.placeholder((n,), dtype=dtype, name="R")
def do_compute(ins, outs):
ib = tvm.tir.ir_builder.create()
A, R = map(ib.buffer_ptr, ins)
B = ib.buffer_ptr(outs[0])
if "gpu" in target.keys:
ib.scope_attr(te.thread_axis("blockIdx.x"), "thread_extent", 0)
index_map = {
"ramp": tvm.tir.Ramp(0, 1, 4),
"broadcast": tvm.tir.Broadcast(0, 4),
}
load_index = index_map[load_type]
store_index = index_map[store_type]
if indirect_indices:
load_index = tvm.tir.expr.Load("int32x4", R, load_index)
transfer = tvm.tir.expr.Load("int32x4", A, load_index)
ib.emit(tvm.tir.stmt.Store(B, transfer, store_index))
return ib.get()
B = te.extern(A.shape, [A, R], do_compute, dtype="int32")
s = te.create_schedule(B.op)
return tvm.lower(s, [A, R, B])
def test_ramp_broadcast_index(self, target, dev, mod, ref_data):
f = tvm.build(mod, target=target)
a_np, reorder_np, b_np = ref_data
a = tvm.nd.array(a_np, dev)
r = tvm.nd.array(reorder_np, dev)
b = tvm.nd.array(np.zeros(shape=b_np.shape, dtype="int32"), dev)
f(a, r, b)
tvm.testing.assert_allclose(b.numpy(), b_np)
@tvm.testing.parametrize_targets("vulkan -max_shared_memory_per_block=16384")
def test_shared_mem_alloc(target, dev):
alloc_nbytes = 16384 * 2
def do_compute(ins, outs):
ib = tvm.tir.ir_builder.create()
out = ib.buffer_ptr(outs[0])
ib.scope_attr(te.thread_axis("blockIdx.x"), "thread_extent", 0)
array = ib.allocate("int32", (alloc_nbytes,), name="array", scope="shared")
array[0] = 0
out[0] = array[0]
return ib.get()
Out = te.extern(
shape=(1,),
inputs=[],
fcompute=do_compute,
dtype="int32",
)
s = te.create_schedule(Out.op)
# Codegen should raise error when allocating more memory than the
# target supports.
with pytest.raises(tvm.TVMError):
tvm.build(s, [Out], target)
if __name__ == "__main__":
import sys
sys.exit(pytest.main([__file__] + sys.argv[1:]))
|
|
# -*- coding: utf-8 -*-
"""
sphinx.ext.graphviz
~~~~~~~~~~~~~~~~~~~
Allow graphviz-formatted graphs to be included in Sphinx-generated
documents inline.
:copyright: Copyright 2007-2018 by the Sphinx team, see AUTHORS.
:license: BSD, see LICENSE for details.
"""
import codecs
import posixpath
import re
from hashlib import sha1
from os import path
from subprocess import Popen, PIPE
from docutils import nodes
from docutils.parsers.rst import Directive, directives
from docutils.statemachine import ViewList
from six import text_type
import sphinx
from sphinx.errors import SphinxError
from sphinx.locale import _, __
from sphinx.util import logging
from sphinx.util.i18n import search_image_for_language
from sphinx.util.osutil import ensuredir, ENOENT, EPIPE, EINVAL
if False:
# For type annotation
from typing import Any, Dict, List, Tuple # NOQA
from sphinx.application import Sphinx # NOQA
logger = logging.getLogger(__name__)
class GraphvizError(SphinxError):
category = 'Graphviz error'
class ClickableMapDefinition(object):
"""A manipulator for clickable map file of graphviz."""
maptag_re = re.compile('<map id="(.*?)"')
href_re = re.compile('href=".*?"')
def __init__(self, filename, content, dot=''):
# type: (unicode, unicode, unicode) -> None
self.id = None # type: unicode
self.filename = filename
self.content = content.splitlines()
self.clickable = [] # type: List[unicode]
self.parse(dot=dot)
def parse(self, dot=None):
# type: (unicode) -> None
matched = self.maptag_re.match(self.content[0]) # type: ignore
if not matched:
raise GraphvizError('Invalid clickable map file found: %s' % self.filename)
self.id = matched.group(1)
if self.id == '%3':
# graphviz generates wrong ID if graph name not specified
# https://gitlab.com/graphviz/graphviz/issues/1327
hashed = sha1(dot.encode('utf-8')).hexdigest()
self.id = 'grapviz%s' % hashed[-10:]
self.content[0] = self.content[0].replace('%3', self.id)
for line in self.content:
if self.href_re.search(line): # type: ignore
self.clickable.append(line)
def generate_clickable_map(self):
# type: () -> unicode
"""Generate clickable map tags if clickable item exists.
If not exists, this only returns empty string.
"""
if self.clickable:
return '\n'.join([self.content[0]] + self.clickable + [self.content[-1]])
else:
return ''
class graphviz(nodes.General, nodes.Inline, nodes.Element):
pass
def figure_wrapper(directive, node, caption):
# type: (Directive, nodes.Node, unicode) -> nodes.figure
figure_node = nodes.figure('', node)
if 'align' in node:
figure_node['align'] = node.attributes.pop('align')
parsed = nodes.Element()
directive.state.nested_parse(ViewList([caption], source=''),
directive.content_offset, parsed)
caption_node = nodes.caption(parsed[0].rawsource, '',
*parsed[0].children)
caption_node.source = parsed[0].source
caption_node.line = parsed[0].line
figure_node += caption_node
return figure_node
def align_spec(argument):
# type: (Any) -> bool
return directives.choice(argument, ('left', 'center', 'right'))
class Graphviz(Directive):
"""
Directive to insert arbitrary dot markup.
"""
has_content = True
required_arguments = 0
optional_arguments = 1
final_argument_whitespace = False
option_spec = {
'alt': directives.unchanged,
'align': align_spec,
'caption': directives.unchanged,
'graphviz_dot': directives.unchanged,
'name': directives.unchanged,
}
def run(self):
# type: () -> List[nodes.Node]
if self.arguments:
document = self.state.document
if self.content:
return [document.reporter.warning(
__('Graphviz directive cannot have both content and '
'a filename argument'), line=self.lineno)]
env = self.state.document.settings.env
argument = search_image_for_language(self.arguments[0], env)
rel_filename, filename = env.relfn2path(argument)
env.note_dependency(rel_filename)
try:
with codecs.open(filename, 'r', 'utf-8') as fp:
dotcode = fp.read()
except (IOError, OSError):
return [document.reporter.warning(
__('External Graphviz file %r not found or reading '
'it failed') % filename, line=self.lineno)]
else:
dotcode = '\n'.join(self.content)
if not dotcode.strip():
return [self.state_machine.reporter.warning(
__('Ignoring "graphviz" directive without content.'),
line=self.lineno)]
node = graphviz()
node['code'] = dotcode
node['options'] = {}
if 'graphviz_dot' in self.options:
node['options']['graphviz_dot'] = self.options['graphviz_dot']
if 'alt' in self.options:
node['alt'] = self.options['alt']
if 'align' in self.options:
node['align'] = self.options['align']
caption = self.options.get('caption')
if caption:
node = figure_wrapper(self, node, caption)
self.add_name(node)
return [node]
class GraphvizSimple(Directive):
"""
Directive to insert arbitrary dot markup.
"""
has_content = True
required_arguments = 1
optional_arguments = 0
final_argument_whitespace = False
option_spec = {
'alt': directives.unchanged,
'align': align_spec,
'caption': directives.unchanged,
'graphviz_dot': directives.unchanged,
'name': directives.unchanged,
}
def run(self):
# type: () -> List[nodes.Node]
node = graphviz()
node['code'] = '%s %s {\n%s\n}\n' % \
(self.name, self.arguments[0], '\n'.join(self.content))
node['options'] = {}
if 'graphviz_dot' in self.options:
node['options']['graphviz_dot'] = self.options['graphviz_dot']
if 'alt' in self.options:
node['alt'] = self.options['alt']
if 'align' in self.options:
node['align'] = self.options['align']
caption = self.options.get('caption')
if caption:
node = figure_wrapper(self, node, caption)
self.add_name(node)
return [node]
def render_dot(self, code, options, format, prefix='graphviz'):
# type: (nodes.NodeVisitor, unicode, Dict, unicode, unicode) -> Tuple[unicode, unicode]
"""Render graphviz code into a PNG or PDF output file."""
graphviz_dot = options.get('graphviz_dot', self.builder.config.graphviz_dot)
hashkey = (code + str(options) + str(graphviz_dot) +
str(self.builder.config.graphviz_dot_args)).encode('utf-8')
fname = '%s-%s.%s' % (prefix, sha1(hashkey).hexdigest(), format)
relfn = posixpath.join(self.builder.imgpath, fname)
outfn = path.join(self.builder.outdir, self.builder.imagedir, fname)
if path.isfile(outfn):
return relfn, outfn
if (hasattr(self.builder, '_graphviz_warned_dot') and
self.builder._graphviz_warned_dot.get(graphviz_dot)):
return None, None
ensuredir(path.dirname(outfn))
# graphviz expects UTF-8 by default
if isinstance(code, text_type):
code = code.encode('utf-8')
dot_args = [graphviz_dot]
dot_args.extend(self.builder.config.graphviz_dot_args)
dot_args.extend(['-T' + format, '-o' + outfn])
if format == 'png':
dot_args.extend(['-Tcmapx', '-o%s.map' % outfn])
try:
p = Popen(dot_args, stdout=PIPE, stdin=PIPE, stderr=PIPE)
except OSError as err:
if err.errno != ENOENT: # No such file or directory
raise
logger.warning(__('dot command %r cannot be run (needed for graphviz '
'output), check the graphviz_dot setting'), graphviz_dot)
if not hasattr(self.builder, '_graphviz_warned_dot'):
self.builder._graphviz_warned_dot = {}
self.builder._graphviz_warned_dot[graphviz_dot] = True
return None, None
try:
# Graphviz may close standard input when an error occurs,
# resulting in a broken pipe on communicate()
stdout, stderr = p.communicate(code)
except (OSError, IOError) as err:
if err.errno not in (EPIPE, EINVAL):
raise
# in this case, read the standard output and standard error streams
# directly, to get the error message(s)
stdout, stderr = p.stdout.read(), p.stderr.read()
p.wait()
if p.returncode != 0:
raise GraphvizError(__('dot exited with error:\n[stderr]\n%s\n'
'[stdout]\n%s') % (stderr, stdout))
if not path.isfile(outfn):
raise GraphvizError(__('dot did not produce an output file:\n[stderr]\n%s\n'
'[stdout]\n%s') % (stderr, stdout))
return relfn, outfn
def render_dot_html(self, node, code, options, prefix='graphviz',
imgcls=None, alt=None, link_to_svg=None):
# type: (nodes.NodeVisitor, graphviz, unicode, Dict, unicode, unicode, unicode) -> Tuple[unicode, unicode] # NOQA
format = self.builder.config.graphviz_output_format
try:
if format not in ('png', 'svg'):
raise GraphvizError(__("graphviz_output_format must be one of 'png', "
"'svg', but is %r") % format)
fname, outfn = render_dot(self, code, options, format, prefix)
except GraphvizError as exc:
logger.warning(__('dot code %r: %s'), code, text_type(exc))
raise nodes.SkipNode
if fname is None:
self.body.append(self.encode(code))
else:
if alt is None:
alt = node.get('alt', self.encode(code).strip())
imgcss = imgcls and 'class="%s"' % imgcls or ''
if 'align' in node:
self.body.append('<div align="%s" class="align-%s">' %
(node['align'], node['align']))
if format == 'svg':
self.body.append('<div class="graphviz">')
if link_to_svg is not None:
self.body.append(
'<p><a href="%s">%s</a></p>' % (fname, link_to_svg))
self.body.append('<object data="%s" type="image/svg+xml" %s>\n' %
(fname, imgcss))
self.body.append('<p class="warning">%s</p>' % alt)
self.body.append('</object></div>\n')
else:
with codecs.open(outfn + '.map', 'r', encoding='utf-8') as mapfile: # type: ignore
imgmap = ClickableMapDefinition(outfn + '.map', mapfile.read(), dot=code)
if imgmap.clickable:
# has a map
self.body.append('<div class="graphviz">')
self.body.append('<img src="%s" alt="%s" usemap="#%s" %s/>' %
(fname, alt, imgmap.id, imgcss))
self.body.append('</div>\n')
self.body.append(imgmap.generate_clickable_map())
else:
# nothing in image map
self.body.append('<div class="graphviz">')
self.body.append('<img src="%s" alt="%s" %s/>' %
(fname, alt, imgcss))
self.body.append('</div>\n')
if 'align' in node:
self.body.append('</div>\n')
raise nodes.SkipNode
def html_visit_graphviz(self, node):
# type: (nodes.NodeVisitor, graphviz) -> None
render_dot_html(self, node, node['code'], node['options'])
def render_dot_latex(self, node, code, options, prefix='graphviz'):
# type: (nodes.NodeVisitor, graphviz, unicode, Dict, unicode) -> None
try:
fname, outfn = render_dot(self, code, options, 'pdf', prefix)
except GraphvizError as exc:
logger.warning(__('dot code %r: %s'), code, text_type(exc))
raise nodes.SkipNode
is_inline = self.is_inline(node)
if not is_inline:
pre = ''
post = ''
if 'align' in node:
if node['align'] == 'left':
pre = '{'
post = r'\hspace*{\fill}}'
elif node['align'] == 'right':
pre = r'{\hspace*{\fill}'
post = '}'
elif node['align'] == 'center':
pre = r'{\hfill'
post = r'\hspace*{\fill}}'
self.body.append('\n%s' % pre)
self.body.append(r'\sphinxincludegraphics[]{%s}' % fname)
if not is_inline:
self.body.append('%s\n' % post)
raise nodes.SkipNode
def latex_visit_graphviz(self, node):
# type: (nodes.NodeVisitor, graphviz) -> None
render_dot_latex(self, node, node['code'], node['options'])
def render_dot_texinfo(self, node, code, options, prefix='graphviz'):
# type: (nodes.NodeVisitor, graphviz, unicode, Dict, unicode) -> None
try:
fname, outfn = render_dot(self, code, options, 'png', prefix)
except GraphvizError as exc:
logger.warning(__('dot code %r: %s'), code, text_type(exc))
raise nodes.SkipNode
if fname is not None:
self.body.append('@image{%s,,,[graphviz],png}\n' % fname[:-4])
raise nodes.SkipNode
def texinfo_visit_graphviz(self, node):
# type: (nodes.NodeVisitor, graphviz) -> None
render_dot_texinfo(self, node, node['code'], node['options'])
def text_visit_graphviz(self, node):
# type: (nodes.NodeVisitor, graphviz) -> None
if 'alt' in node.attributes:
self.add_text(_('[graph: %s]') % node['alt'])
else:
self.add_text(_('[graph]'))
raise nodes.SkipNode
def man_visit_graphviz(self, node):
# type: (nodes.NodeVisitor, graphviz) -> None
if 'alt' in node.attributes:
self.body.append(_('[graph: %s]') % node['alt'])
else:
self.body.append(_('[graph]'))
raise nodes.SkipNode
def setup(app):
# type: (Sphinx) -> Dict[unicode, Any]
app.add_node(graphviz,
html=(html_visit_graphviz, None),
latex=(latex_visit_graphviz, None),
texinfo=(texinfo_visit_graphviz, None),
text=(text_visit_graphviz, None),
man=(man_visit_graphviz, None))
app.add_directive('graphviz', Graphviz)
app.add_directive('graph', GraphvizSimple)
app.add_directive('digraph', GraphvizSimple)
app.add_config_value('graphviz_dot', 'dot', 'html')
app.add_config_value('graphviz_dot_args', [], 'html')
app.add_config_value('graphviz_output_format', 'png', 'html')
return {'version': sphinx.__display_version__, 'parallel_read_safe': True}
|
|
#!/usr/bin/env python
"""
Copyright (c) 2014-2022 Maltrail developers (https://github.com/stamparm/maltrail/)
See the file 'LICENSE' for copying permission
"""
from __future__ import print_function
import csv
import gzip
import io
import os
import re
import sqlite3
import zipfile
import zlib
from core.addr import addr_to_int
from core.addr import int_to_addr
from core.compat import xrange
from core.settings import config
from core.settings import BOGON_RANGES
from core.settings import CHECK_CONNECTION_URL
from core.settings import CDN_RANGES
from core.settings import IPCAT_SQLITE_FILE
from core.settings import IS_WIN
from core.settings import MAX_HELP_OPTION_LENGTH
from core.settings import STATIC_IPCAT_LOOKUPS
from core.settings import TIMEOUT
from core.settings import UNICODE_ENCODING
from core.settings import USER_AGENT
from core.settings import WHITELIST
from core.settings import WHITELIST_RANGES
from core.settings import WORST_ASNS
from core.trailsdict import TrailsDict
from thirdparty import six
from thirdparty.six.moves import urllib as _urllib
_ipcat_cache = {}
def retrieve_content(url, data=None, headers=None):
"""
Retrieves page content from given URL
"""
try:
req = _urllib.request.Request("".join(url[i].replace(' ', "%20") if i > url.find('?') else url[i] for i in xrange(len(url))), data, headers or {"User-agent": USER_AGENT, "Accept-encoding": "gzip, deflate"})
resp = _urllib.request.urlopen(req, timeout=TIMEOUT)
retval = resp.read()
encoding = resp.headers.get("Content-Encoding")
if encoding:
if encoding.lower() == "deflate":
data = io.BytesIO(zlib.decompress(retval, -15))
elif encoding.lower() == "gzip":
data = gzip.GzipFile("", "rb", 9, io.BytesIO(retval))
retval = data.read()
except Exception as ex:
retval = ex.read() if hasattr(ex, "read") else (get_ex_message(ex) or "")
if url.startswith("https://") and isinstance(retval, str) and "handshake failure" in retval:
return retrieve_content(url.replace("https://", "http://"), data, headers)
retval = retval or b""
if six.PY3 and isinstance(retval, bytes):
retval = retval.decode(UNICODE_ENCODING, errors="replace")
return retval
def ipcat_lookup(address):
if not address:
return None
if not _ipcat_cache:
for name in STATIC_IPCAT_LOOKUPS:
for value in STATIC_IPCAT_LOOKUPS[name]:
if "-" in value:
start, end = value.split('-')
start_int, end_int = addr_to_int(start), addr_to_int(end)
current = start_int
while start_int <= current <= end_int:
_ipcat_cache[int_to_addr(current)] = name
current += 1
else:
_ipcat_cache[value] = name
if address in _ipcat_cache:
retval = _ipcat_cache[address]
else:
retval = ""
if os.path.isfile(IPCAT_SQLITE_FILE):
with sqlite3.connect(IPCAT_SQLITE_FILE, isolation_level=None) as conn:
cursor = conn.cursor()
try:
_ = addr_to_int(address)
cursor.execute("SELECT name FROM ranges WHERE start_int <= ? AND end_int >= ?", (_, _))
_ = cursor.fetchone()
retval = str(_[0]) if _ else retval
except:
raise ValueError("[x] invalid IP address '%s'" % address)
_ipcat_cache[address] = retval
return retval
def worst_asns(address):
if not address:
return None
try:
_ = addr_to_int(address)
for prefix, mask, name in WORST_ASNS.get(address.split('.')[0], {}):
if _ & mask == prefix:
return name
except (IndexError, ValueError):
pass
return None
def cdn_ip(address):
if not address:
return False
try:
_ = addr_to_int(address)
for prefix, mask in CDN_RANGES.get(address.split('.')[0], {}):
if _ & mask == prefix:
return True
except (IndexError, ValueError):
pass
return False
def bogon_ip(address):
if not address:
return False
try:
_ = addr_to_int(address)
for prefix, mask in BOGON_RANGES.get(address.split('.')[0], {}):
if _ & mask == prefix:
return True
except (IndexError, ValueError):
pass
return False
def check_sudo():
"""
Checks for root privileges
"""
check = None
if not IS_WIN:
if getattr(os, "geteuid"):
check = os.geteuid() == 0
else:
import ctypes
check = ctypes.windll.shell32.IsUserAnAdmin()
return check
def extract_zip(filename, path=None):
_ = zipfile.ZipFile(filename, 'r')
_.extractall(path)
def get_regex(items):
head = {}
for item in sorted(items):
current = head
for char in item:
if char not in current:
current[char] = {}
current = current[char]
current[""] = {}
def process(current):
if not current:
return ""
if not any(current[_] for _ in current):
if len(current) > 1:
items = []
previous = None
start = None
for _ in sorted(current) + [six.unichr(65535)]:
if previous is not None:
if ord(_) == ord(previous) + 1:
pass
else:
if start != previous:
if start == '0' and previous == '9':
items.append(r"\d")
else:
items.append("%s-%s" % (re.escape(start), re.escape(previous)))
else:
items.append(re.escape(previous))
start = _
if start is None:
start = _
previous = _
return ("[%s]" % "".join(items)) if len(items) > 1 or '-' in items[0] else "".join(items)
else:
return re.escape(list(current.keys())[0])
else:
return ("(?:%s)" if len(current) > 1 else "%s") % ('|'.join("%s%s" % (re.escape(_), process(current[_])) for _ in sorted(current))).replace('|'.join(str(_) for _ in xrange(10)), r"\d")
regex = process(head).replace(r"(?:|\d)", r"\d?")
return regex
def check_connection():
return len(retrieve_content(CHECK_CONNECTION_URL) or "") > 0
def check_whitelisted(trail):
if trail in WHITELIST:
return True
if trail and trail[0].isdigit():
try:
_ = addr_to_int(trail)
for prefix, mask in WHITELIST_RANGES:
if _ & mask == prefix:
return True
except (IndexError, ValueError):
pass
return False
def load_trails(quiet=False):
if not quiet:
print("[i] loading trails...")
retval = TrailsDict()
if os.path.isfile(config.TRAILS_FILE):
try:
with open(config.TRAILS_FILE, "r") as f:
reader = csv.reader(f, delimiter=',', quotechar='\"')
for row in reader:
if row and len(row) == 3:
trail, info, reference = row
if not check_whitelisted(trail):
retval[trail] = (info, reference)
except Exception as ex:
exit("[!] something went wrong during trails file read '%s' ('%s')" % (config.TRAILS_FILE, ex))
if not quiet:
_ = len(retval)
try:
_ = '{0:,}'.format(_)
except:
pass
print("[i] %s trails loaded" % _)
return retval
def get_text(value):
retval = value
if six.PY2:
try:
retval = str(retval)
except:
pass
else:
if isinstance(value, six.binary_type):
retval = value.decode(UNICODE_ENCODING, errors="replace")
return retval
def get_ex_message(ex):
retval = None
if getattr(ex, "message", None):
retval = ex.message
elif getattr(ex, "msg", None):
retval = ex.msg
elif getattr(ex, "args", None):
for candidate in ex.args[::-1]:
if isinstance(candidate, six.string_types):
retval = candidate
break
if retval is None:
retval = str(ex)
return retval
def is_local(address):
return re.search(r"\A(127|10|172\.[13][0-9]|192\.168)\.", address or "") is not None
def patch_parser(parser):
# Dirty hack to display longer options without breaking into two lines
if hasattr(parser, "formatter"):
def _(self, *args):
retval = parser.formatter._format_option_strings(*args)
if len(retval) > MAX_HELP_OPTION_LENGTH:
retval = ("%%.%ds.." % (MAX_HELP_OPTION_LENGTH - parser.formatter.indent_increment)) % retval
return retval.capitalize()
parser.formatter._format_option_strings = parser.formatter.format_option_strings
parser.formatter.format_option_strings = type(parser.formatter.format_option_strings)(_, parser)
else:
def _format_action_invocation(self, action):
retval = self.__format_action_invocation(action)
if len(retval) > MAX_HELP_OPTION_LENGTH:
retval = ("%%.%ds.." % (MAX_HELP_OPTION_LENGTH - self._indent_increment)) % retval
return retval.capitalize()
parser.formatter_class.__format_action_invocation = parser.formatter_class._format_action_invocation
parser.formatter_class._format_action_invocation = _format_action_invocation
|
|
import mock
import os
import subprocess
import unittest
import colorama
import testutils
from bin.commands import state
class TestStatePrintSection(unittest.TestCase):
def test_printsection_withaccent(self):
# setup
text = 'the text\nhere\n'
title = 'the title'
accent = 'the accent'
expected_output = '# ' + colorama.Fore.GREEN + title + ' ' + accent + colorama.Fore.RESET + os.linesep + text
# when
section_output = state._print_section(title, accent=accent, text=text, color='always')
# then
self.assertEqual(section_output, expected_output)
def test_printsection_withoutaccent(self):
# setup
text = 'the text\nhere\n'
title = 'the title'
expected_output = '# ' + colorama.Fore.GREEN + title + colorama.Fore.RESET + os.linesep + text
# when
section_output = state._print_section(title, accent=None, text=text, color='always')
# then
self.assertEqual(section_output, expected_output)
def test_printsection_donotshowempty_notext(self):
# when
section_output = state._print_section('title', text=None, show_empty=False, color='always')
# then
self.assertEqual(section_output, '')
def test_printsection_donotshowempty_withtext(self):
# setup
text = 'the text\nhere\n'
title = 'the title'
expected_output = '# ' + colorama.Fore.GREEN + title + colorama.Fore.RESET + os.linesep + text
# when
section_output = state._print_section(title, accent=None, text=text, show_empty=False, color='always')
# then
self.assertEqual(section_output, expected_output)
def test_printsection_showempty_notext(self):
# given
expected_output = '# ' + colorama.Fore.GREEN + 'title' + colorama.Fore.RESET + os.linesep
# when
section_output = state._print_section('title', text=None, show_empty=True, color='always')
# then
self.assertEqual(section_output, expected_output)
def test_printsection_prettyandtext(self):
# setup
text = 'the text\nhere\n'
title = 'the title'
expected_output = """# {}{}{}
the text
here
""".format(colorama.Fore.GREEN, title, colorama.Fore.RESET)
# when
section_output = state._print_section(title, text=text, format_='pretty', color='always')
# then
self.assertEqual(section_output, expected_output)
def test_printsection_prettyandnotext(self):
# setup
title = 'the title'
expected_output = "# {}{}{}".format(colorama.Fore.GREEN, title, colorama.Fore.RESET) + os.linesep + os.linesep
# when
section_output = state._print_section(title, text=None, format_='pretty', show_empty=True, color='always')
# then
self.assertEqual(section_output, expected_output)
def test_printsection_compact(self):
# setup
text = 'the text\nhere\n'
title = 'the title'
expected_output = '# ' + colorama.Fore.GREEN + title + colorama.Fore.RESET + os.linesep + text
# when
section_output = state._print_section(title, text=text, color='always')
# then
self.assertEqual(section_output, expected_output)
@mock.patch('bin.commands.utils.messages.error', side_effect=testutils.and_exit)
def test_printsection_unknownformat(self, mock_error):
# when
try:
state._print_section('title', text='text', format_='invalid', color='always')
self.fail('expected to exit but did not') # pragma: no cover
except SystemExit:
pass
# then
mock_error.assert_called_once_with("unknown format 'invalid'")
@mock.patch('sys.stdout.isatty', return_value=True)
def test_printsection_color_auto_isatty(self, mock_isatty):
# given
expected_output = '# ' + colorama.Fore.GREEN + 'title' + colorama.Fore.RESET + os.linesep
# when
section_output = state._print_section('title', text=None, show_empty=True, color='auto')
# then
self.assertEqual(section_output, expected_output)
mock_isatty.assert_called_once_with()
@mock.patch('sys.stdout.isatty', return_value=False)
def test_printsection_color_auto_isnotatty(self, mock_isatty):
# given
expected_output = '# ' + colorama.Fore.RESET + 'title' + colorama.Fore.RESET + os.linesep
# when
section_output = state._print_section('title', text=None, show_empty=True, color='auto')
# then
self.assertEqual(section_output, expected_output)
mock_isatty.assert_called_once_with()
def test_printsection_color_never(self):
# given
expected_output = '# ' + colorama.Fore.RESET + 'title' + colorama.Fore.RESET + os.linesep
# when
section_output = state._print_section('title', text=None, show_empty=True, color='never')
# then
self.assertEqual(section_output, expected_output)
class TestStateState(unittest.TestCase):
@mock.patch('bin.commands.utils.directories.is_git_repository', return_value=True)
@mock.patch('bin.commands.utils.git.get_config_value', return_value=False)
@mock.patch('bin.commands.utils.git.is_empty_repository', return_value=False)
@mock.patch('bin.commands.stateextensions.status.get')
@mock.patch('bin.commands.stateextensions.status.title')
@mock.patch('bin.commands.stateextensions.status.accent')
@mock.patch('bin.commands.state._print_section')
@mock.patch('bin.commands.settings.list_')
@mock.patch('bin.commands.utils.execute.check_output', return_value='100')
@mock.patch('bin.commands.utils.execute.call')
@mock.patch('bin.commands.utils.messages.info')
def test_state_status(
self,
mock_info,
mock_call,
mock_checkoutput,
mock_list,
mock_printsection,
mock_statusaccent,
mock_statustitle,
mock_statusget,
mock_isemptyrepository,
mock_getconfigvalue,
mock_isgitrepository
):
# setup
format_ = 'compact'
kwargs = {
'show_color': 'never',
'format_': format_,
'clear': False,
'ignore_extensions': [],
'show_empty': True
}
mock_statusget.return_value = 'status output'
mock_statustitle.return_value = 'status title'
mock_statusaccent.return_value = 'status accent'
mock_printsection.return_value = 'status section\n'
mock_getconfigvalue.side_effect = [True, []]
mock_list.return_value = ''
# when
state.state(**kwargs)
# then
mock_isgitrepository.assert_called_once_with()
mock_isemptyrepository.assert_called_once_with()
mock_printsection.assert_called_once_with(
mock_statustitle.return_value,
mock_statusaccent.return_value,
mock_statusget.return_value,
format_,
show_empty=True,
color='never'
)
mock_getconfigvalue.assert_has_calls([
mock.call('git-state.status.show-clean-message', default=True, as_type=mock.ANY),
mock.call('git-state.order', default=[], as_type=mock.ANY)
])
self.assertEqual(mock_getconfigvalue.call_args_list[0][1]['as_type'].func_name, 'as_bool')
mock_list.assert_called_once_with(limit_to='sections')
mock_checkoutput.assert_called_once_with('tput lines'.split())
mock_info.assert_called_once_with('status section')
mock_call.assert_not_called()
@mock.patch('bin.commands.utils.directories.is_git_repository', return_value=True)
@mock.patch('bin.commands.utils.git.get_config_value', return_value=False)
@mock.patch('bin.commands.utils.git.is_empty_repository', return_value=False)
@mock.patch('bin.commands.state._print_section')
@mock.patch('bin.commands.settings.list_')
@mock.patch('bin.commands.utils.execute.check_output', return_value='100')
@mock.patch('bin.commands.utils.execute.call')
@mock.patch('bin.commands.utils.messages.info')
def test_state_alloff(
self,
mock_info,
mock_call,
mock_checkoutput,
mock_list,
mock_printsection,
mock_isemptyrepository,
mock_getconfigvalue,
mock_isgitrepository
):
# setup
format_ = 'compact'
kwargs = {
'show_color': 'never',
'format_': format_,
'clear': False,
'ignore_extensions': ['status'],
'show_empty': True
}
mock_getconfigvalue.side_effect = [True, []]
mock_list.return_value = ''
# when
state.state(**kwargs)
# then
mock_isgitrepository.assert_called_once_with()
mock_isemptyrepository.assert_called_once_with()
mock_printsection.assert_not_called()
mock_getconfigvalue.assert_has_calls([
mock.call('git-state.status.show-clean-message', default=True, as_type=mock.ANY),
mock.call('git-state.order', default=[], as_type=mock.ANY)
])
mock_list.assert_called_once_with(limit_to='sections')
mock_checkoutput.assert_not_called()
mock_info.assert_not_called()
mock_call.assert_not_called()
@mock.patch('bin.commands.utils.directories.is_git_repository', return_value=False)
@mock.patch('bin.commands.utils.messages.error', side_effect=testutils.and_exit)
@mock.patch('os.getcwd', return_value='/working/dir')
def test_state_notagitrepository(self, mock_getcwd, mock_error, mock_isgitrepository):
# when
try:
state.state()
self.fail('expected to exit but did not') # pragma: no cover
except SystemExit:
pass
# then
mock_isgitrepository.assert_called_once_with()
mock_error.assert_called_once_with("'/working/dir' not a git repository")
mock_getcwd.assert_called_once_with()
@mock.patch('bin.commands.utils.directories.is_git_repository', return_value=True)
@mock.patch('bin.commands.utils.git.resolve_coloring')
@mock.patch('colorama.init')
@mock.patch('bin.commands.utils.git.get_config_value', return_value=False)
@mock.patch('bin.commands.utils.git.is_empty_repository', return_value=False)
@mock.patch('bin.commands.stateextensions.status.get')
@mock.patch('bin.commands.stateextensions.status.title')
@mock.patch('bin.commands.stateextensions.status.accent')
@mock.patch('bin.commands.state._print_section')
@mock.patch('bin.commands.settings.list_')
@mock.patch('bin.commands.utils.execute.check_output', return_value='100')
@mock.patch('bin.commands.utils.execute.call')
@mock.patch('bin.commands.utils.messages.info')
def test_state_showcolor_never(
self,
mock_info,
mock_call,
mock_checkoutput,
mock_list,
mock_printsection,
mock_statusaccent,
mock_statustitle,
mock_statusget,
mock_isemptyrepository,
mock_getconfigvalue,
mock_init,
mock_resolvecoloring,
mock_isgitrepository
):
# setup
format_ = 'compact'
kwargs = {
'show_color': 'always',
'format_': format_,
'clear': False,
'ignore_extensions': [],
'show_empty': True
}
mock_resolvecoloring.return_value = 'never'
mock_statusget.return_value = 'status output'
mock_statustitle.return_value = 'status title'
mock_statusaccent.return_value = 'status accent'
mock_printsection.return_value = 'status section\n'
mock_getconfigvalue.side_effect = [True, []]
mock_list.return_value = ''
# when
state.state(**kwargs)
# then
mock_statusget.assert_called_once_with(
clear=False,
format_='compact',
ignore_extensions=[],
show_clean_message=True,
show_color='never',
show_empty=True
)
mock_isgitrepository.assert_called_once_with()
mock_resolvecoloring.assert_called_once_with('always')
mock_init.assert_called_once_with(strip=True)
mock_isemptyrepository.assert_called_once_with()
mock_list.return_value = ''
mock_printsection.assert_called_once_with(
mock_statustitle.return_value,
mock_statusaccent.return_value,
mock_statusget.return_value,
format_,
show_empty=True,
color='never'
)
mock_getconfigvalue.assert_has_calls([
mock.call('git-state.status.show-clean-message', default=True, as_type=mock.ANY),
mock.call('git-state.order', default=[], as_type=mock.ANY)
])
self.assertEqual(mock_getconfigvalue.call_args_list[0][1]['as_type'].func_name, 'as_bool')
mock_list.assert_called_once_with(limit_to='sections')
mock_checkoutput.assert_called_once_with('tput lines'.split())
mock_info.assert_called_once_with('status section')
mock_call.assert_not_called()
@mock.patch('bin.commands.utils.directories.is_git_repository', return_value=True)
@mock.patch('bin.commands.utils.git.resolve_coloring')
@mock.patch('colorama.init')
@mock.patch('bin.commands.utils.git.get_config_value', return_value=False)
@mock.patch('bin.commands.utils.git.is_empty_repository', return_value=False)
@mock.patch('bin.commands.stateextensions.status.get')
@mock.patch('bin.commands.stateextensions.status.title')
@mock.patch('bin.commands.stateextensions.status.accent')
@mock.patch('bin.commands.state._print_section')
@mock.patch('bin.commands.settings.list_')
@mock.patch('bin.commands.utils.execute.check_output', return_value='100')
@mock.patch('bin.commands.utils.execute.call')
@mock.patch('bin.commands.utils.messages.info')
def test_state_showcolor_always(
self,
mock_info,
mock_call,
mock_checkoutput,
mock_list,
mock_printsection,
mock_statusaccent,
mock_statustitle,
mock_statusget,
mock_isemptyrepository,
mock_getconfigvalue,
mock_init,
mock_resolvecoloring,
mock_isgitrepository
):
# setup
format_ = 'compact'
kwargs = {
'show_color': 'auto',
'format_': format_,
'clear': False,
'ignore_extensions': [],
'show_empty': True
}
mock_list.return_value = ''
mock_resolvecoloring.return_value = 'always'
mock_statusget.return_value = 'status output'
mock_statustitle.return_value = 'status title'
mock_statusaccent.return_value = 'status accent'
mock_printsection.return_value = 'status section\n'
mock_getconfigvalue.side_effect = [True, []]
# when
state.state(**kwargs)
# then
mock_statusget.assert_called_once_with(
clear=False,
format_='compact',
ignore_extensions=[],
show_clean_message=True,
show_color='always',
show_empty=True
)
mock_isgitrepository.assert_called_once_with()
mock_resolvecoloring.assert_called_once_with('auto')
mock_init.assert_called_once_with(strip=False)
mock_isemptyrepository.assert_called_once_with()
mock_printsection.assert_called_once_with(
mock_statustitle.return_value,
mock_statusaccent.return_value,
mock_statusget.return_value,
format_,
show_empty=True,
color='always'
)
mock_getconfigvalue.assert_has_calls([
mock.call('git-state.status.show-clean-message', default=True, as_type=mock.ANY),
mock.call('git-state.order', default=[], as_type=mock.ANY)
])
self.assertEqual(mock_getconfigvalue.call_args_list[0][1]['as_type'].func_name, 'as_bool')
mock_list.assert_called_once_with(limit_to='sections')
mock_checkoutput.assert_called_once_with('tput lines'.split())
mock_info.assert_called_once_with('status section')
mock_call.assert_not_called()
@mock.patch('bin.commands.utils.directories.is_git_repository', return_value=True)
@mock.patch('bin.commands.utils.git.get_config_value', return_value=False)
@mock.patch('bin.commands.utils.git.is_empty_repository', return_value=True)
@mock.patch('bin.commands.stateextensions.status.get')
@mock.patch('bin.commands.stateextensions.status.title')
@mock.patch('bin.commands.stateextensions.status.accent')
@mock.patch('bin.commands.state._print_section')
@mock.patch('bin.commands.settings.list_', return_return='')
@mock.patch('bin.commands.utils.execute.check_output', return_value='100')
@mock.patch('bin.commands.utils.execute.call')
@mock.patch('bin.commands.utils.messages.info')
def test_state_emptyRepository(
self,
mock_info,
mock_call,
mock_checkoutput,
mock_list,
mock_printsection,
mock_statusaccent,
mock_statustitle,
mock_statusget,
mock_isemptyrepository,
mock_getconfigvalue,
mock_isgitrepository
):
# setup
format_ = 'compact'
show_empty = True
kwargs = {
'show_color': 'never',
'format_': format_,
'clear': False,
'ignore_extensions': [],
'show_empty': show_empty
}
mock_statusget.return_value = 'status output'
mock_statustitle.return_value = 'status title'
mock_statusaccent.return_value = 'status accent'
mock_printsection.return_value = 'section output\n'
mock_getconfigvalue.side_effect = [True, []]
# when
state.state(**kwargs)
# then
mock_isgitrepository.assert_called_once_with()
mock_isemptyrepository.assert_called_once_with()
mock_printsection.assert_called_once_with(
mock_statustitle.return_value,
mock_statusaccent.return_value,
mock_statusget.return_value,
format_,
show_empty=True,
color='never'
)
mock_getconfigvalue.assert_has_calls([
mock.call('git-state.status.show-clean-message', default=True, as_type=mock.ANY),
mock.call('git-state.order', default=[], as_type=mock.ANY)
])
mock_list.assert_not_called()
mock_checkoutput.assert_called_once_with('tput lines'.split())
mock_info.assert_called_once_with('section output')
mock_call.assert_not_called()
@mock.patch('bin.commands.utils.directories.is_git_repository', return_value=True)
@mock.patch('bin.commands.utils.git.get_config_value', return_value=False)
@mock.patch('bin.commands.utils.git.is_empty_repository', return_value=True)
@mock.patch('bin.commands.stateextensions.status.get')
@mock.patch('bin.commands.stateextensions.status.title')
@mock.patch('bin.commands.stateextensions.status.accent')
@mock.patch('bin.commands.state._print_section')
@mock.patch('bin.commands.settings.list_', return_return='')
@mock.patch('bin.commands.utils.execute.check_output', return_value='100')
@mock.patch('bin.commands.utils.execute.call')
@mock.patch('bin.commands.utils.messages.info')
def test_state_emptyRepository_noShowStatus(
self,
mock_info,
mock_call,
mock_checkoutput,
mock_list,
mock_printsection,
mock_statusaccent,
mock_statustitle,
mock_statusget,
mock_isemptyrepository,
mock_getconfigvalue,
mock_isgitrepository
):
# setup
format_ = 'compact'
show_empty = True
kwargs = {
'show_color': 'never',
'format_': format_,
'clear': False,
'ignore_extensions': ['status'],
'show_empty': show_empty
}
mock_statusget.return_value = 'status output'
mock_statustitle.return_value = 'status title'
mock_statusaccent.return_value = 'status accent'
mock_printsection.return_value = 'section output\n'
mock_getconfigvalue.side_effect = [True, []]
# when
state.state(**kwargs)
# then
mock_isgitrepository.assert_called_once_with()
mock_isemptyrepository.assert_called_once_with()
mock_printsection.assert_not_called()
mock_getconfigvalue.assert_has_calls([
mock.call('git-state.status.show-clean-message', default=True, as_type=mock.ANY),
mock.call('git-state.order', default=[], as_type=mock.ANY)
])
mock_list.assert_not_called()
mock_checkoutput.assert_not_called()
mock_info.assert_not_called()
mock_call.assert_not_called()
@mock.patch('bin.commands.utils.directories.is_git_repository', return_value=True)
@mock.patch('bin.commands.utils.git.get_config_value', return_value=False)
@mock.patch('bin.commands.utils.git.is_empty_repository', return_value=False)
@mock.patch('bin.commands.state._print_section')
@mock.patch('bin.commands.settings.list_')
@mock.patch('bin.commands.utils.execute.check_output', return_value='100')
@mock.patch('bin.commands.utils.execute.call')
@mock.patch('bin.commands.utils.messages.info')
@mock.patch('bin.commands.utils.execute.execute')
def test_state_withextensions(
self,
mock_execute,
mock_info,
mock_call,
mock_checkoutput,
mock_list,
mock_printsection,
mock_isemptyrepository,
mock_getconfigvalue,
mock_isgitrepository
):
# setup
format_ = 'compact'
kwargs = {
'show_color': 'never',
'format_': format_,
'clear': False,
'ignore_extensions': ['status'],
'options': {}
}
changes_command = 'changes command'
changes_name = 'changes'
changes_output = 'the changes'
mock_getconfigvalue.side_effect = [True, True, changes_command, changes_name, [], True, []]
mock_list.return_value = 'git-state.extensions.changes'
mock_execute.return_value = [changes_output, None, 0]
mock_printsection.return_value = 'final changes output\n'
# when
state.state(**kwargs)
# then
mock_isgitrepository.assert_called_once_with()
mock_isemptyrepository.assert_called_once_with()
mock_printsection.assert_called_once_with(
title=changes_name,
text=changes_output,
format_=format_,
show_empty=None,
color='never'
)
mock_getconfigvalue.assert_has_calls([
mock.call('git-state.status.show-clean-message', default=True, as_type=mock.ANY),
mock.call('git-state.extensions.changes.show', default=True, as_type=mock.ANY),
mock.call('git-state.extensions.changes.command'),
mock.call('git-state.extensions.changes.name', default='changes'),
mock.call('git-state.extensions.changes.options', default=[], as_type=mock.ANY),
mock.call('git-state.extensions.changes.color', default=True, as_type=mock.ANY),
mock.call('git-state.order', default=[], as_type=mock.ANY)
])
mock_list.assert_called_once_with(limit_to='sections')
mock_checkoutput.assert_called_once_with('tput lines'.split())
mock_info.assert_called_once_with('final changes output')
mock_call.assert_not_called()
mock_execute.assert_called_once_with(['changes', 'command', '--color=never'])
@mock.patch('bin.commands.utils.directories.is_git_repository', return_value=True)
@mock.patch('bin.commands.utils.git.get_config_value', return_value=False)
@mock.patch('bin.commands.utils.git.is_empty_repository', return_value=False)
@mock.patch('bin.commands.state._print_section')
@mock.patch('bin.commands.settings.list_')
@mock.patch('bin.commands.utils.execute.check_output', return_value='100')
@mock.patch('bin.commands.utils.execute.call')
@mock.patch('bin.commands.utils.messages.info')
@mock.patch('bin.commands.utils.execute.execute')
def test_state_withextensions_doesNotSupportColor(
self,
mock_execute,
mock_info,
mock_call,
mock_checkoutput,
mock_list,
mock_printsection,
mock_isemptyrepository,
mock_getconfigvalue,
mock_isgitrepository
):
# setup
format_ = 'compact'
kwargs = {
'show_color': 'never',
'format_': format_,
'clear': False,
'ignore_extensions': ['status'],
'options': {}
}
changes_command = 'changes command'
changes_name = 'changes'
changes_output = 'the changes'
mock_getconfigvalue.side_effect = [True, True, changes_command, changes_name, [], False, []]
mock_list.return_value = 'git-state.extensions.changes'
mock_execute.return_value = [changes_output, None, 0]
mock_printsection.return_value = 'final changes output\n'
# when
state.state(**kwargs)
# then
mock_isgitrepository.assert_called_once_with()
mock_isemptyrepository.assert_called_once_with()
mock_printsection.assert_called_once_with(
title=changes_name,
text=changes_output,
format_=format_,
show_empty=None,
color='never'
)
mock_getconfigvalue.assert_has_calls([
mock.call('git-state.status.show-clean-message', default=True, as_type=mock.ANY),
mock.call('git-state.extensions.changes.show', default=True, as_type=mock.ANY),
mock.call('git-state.extensions.changes.command'),
mock.call('git-state.extensions.changes.name', default='changes'),
mock.call('git-state.extensions.changes.options', default=[], as_type=mock.ANY),
mock.call('git-state.extensions.changes.color', default=True, as_type=mock.ANY),
mock.call('git-state.order', default=[], as_type=mock.ANY)
])
mock_list.assert_called_once_with(limit_to='sections')
mock_checkoutput.assert_called_once_with('tput lines'.split())
mock_info.assert_called_once_with('final changes output')
mock_call.assert_not_called()
mock_execute.assert_called_once_with(['changes', 'command'])
@mock.patch('bin.commands.utils.directories.is_git_repository', return_value=True)
@mock.patch('bin.commands.utils.git.get_config_value', return_value=False)
@mock.patch('bin.commands.utils.git.is_empty_repository', return_value=False)
@mock.patch('bin.commands.state._print_section')
@mock.patch('bin.commands.settings.list_')
@mock.patch('bin.commands.utils.execute.check_output', return_value='100')
@mock.patch('bin.commands.utils.execute.call')
@mock.patch('bin.commands.utils.messages.info')
@mock.patch('bin.commands.utils.execute.execute')
def test_state_withextensions_withoptions_fromcommandline(
self,
mock_execute,
mock_info,
mock_call,
mock_checkoutput,
mock_list,
mock_printsection,
mock_isemptyrepository,
mock_getconfigvalue,
mock_isgitrepository
):
# setup
format_ = 'compact'
kwargs = {
'show_color': 'never',
'format_': format_,
'clear': False,
'ignore_extensions': ['status'],
'options': {'changes': ['--option1', '-o "1 2"']}
}
changes_command = 'changes command'
changes_name = 'changes'
changes_output = 'the changes'
mock_getconfigvalue.side_effect = [True, True, changes_command, changes_name, [], True, []]
mock_list.return_value = 'git-state.extensions.changes'
mock_execute.return_value = [changes_output, None, 0]
mock_printsection.return_value = 'final changes output\n'
# when
state.state(**kwargs)
# then
mock_isgitrepository.assert_called_once_with()
mock_isemptyrepository.assert_called_once_with()
mock_printsection.assert_called_once_with(
title=changes_name,
text=changes_output,
format_=format_,
show_empty=None,
color='never'
)
mock_getconfigvalue.assert_has_calls([
mock.call('git-state.status.show-clean-message', default=True, as_type=mock.ANY),
mock.call('git-state.extensions.changes.show', default=True, as_type=mock.ANY),
mock.call('git-state.extensions.changes.command'),
mock.call('git-state.extensions.changes.name', default='changes'),
mock.call('git-state.extensions.changes.options', default=[], as_type=mock.ANY),
mock.call('git-state.extensions.changes.color', default=True, as_type=mock.ANY),
mock.call('git-state.order', default=[], as_type=mock.ANY)
])
mock_list.assert_called_once_with(limit_to='sections')
mock_checkoutput.assert_called_once_with('tput lines'.split())
mock_info.assert_called_once_with('final changes output')
mock_call.assert_not_called()
mock_execute.assert_called_once_with(['changes', 'command', '--option1', '-o', '1 2', '--color=never'])
@mock.patch('bin.commands.utils.directories.is_git_repository', return_value=True)
@mock.patch('bin.commands.utils.git.get_config_value', return_value=False)
@mock.patch('bin.commands.utils.git.is_empty_repository', return_value=False)
@mock.patch('bin.commands.state._print_section')
@mock.patch('bin.commands.settings.list_')
@mock.patch('bin.commands.utils.execute.check_output', return_value='100')
@mock.patch('bin.commands.utils.execute.call')
@mock.patch('bin.commands.utils.messages.info')
@mock.patch('bin.commands.utils.execute.execute')
def test_state_withextensions_withoptions_fromconfig(
self,
mock_execute,
mock_info,
mock_call,
mock_checkoutput,
mock_list,
mock_printsection,
mock_isemptyrepository,
mock_getconfigvalue,
mock_isgitrepository
):
# setup
format_ = 'compact'
kwargs = {
'show_color': 'never',
'format_': format_,
'clear': False,
'ignore_extensions': ['status'],
'options': {}
}
changes_command = 'changes command'
changes_name = 'changes'
changes_output = 'the changes'
mock_getconfigvalue.side_effect = [True, True, changes_command, changes_name, ['--option1 -o "1 2"'], True, []]
mock_list.return_value = 'git-state.extensions.changes'
mock_execute.return_value = [changes_output, None, 0]
mock_printsection.return_value = 'final changes output\n'
# when
state.state(**kwargs)
# then
mock_isgitrepository.assert_called_once_with()
mock_isemptyrepository.assert_called_once_with()
mock_printsection.assert_called_once_with(
title=changes_name,
text=changes_output,
format_=format_,
show_empty=None,
color='never'
)
mock_getconfigvalue.assert_has_calls([
mock.call('git-state.status.show-clean-message', default=True, as_type=mock.ANY),
mock.call('git-state.extensions.changes.show', default=True, as_type=mock.ANY),
mock.call('git-state.extensions.changes.command'),
mock.call('git-state.extensions.changes.name', default='changes'),
mock.call('git-state.extensions.changes.options', default=[], as_type=mock.ANY),
mock.call('git-state.extensions.changes.color', default=True, as_type=mock.ANY),
mock.call('git-state.order', default=[], as_type=mock.ANY)
])
mock_list.assert_called_once_with(limit_to='sections')
mock_checkoutput.assert_called_once_with('tput lines'.split())
mock_info.assert_called_once_with('final changes output')
mock_call.assert_not_called()
mock_execute.assert_called_once_with(['changes', 'command', '--option1', '-o', '1 2', '--color=never'])
@mock.patch('bin.commands.utils.directories.is_git_repository', return_value=True)
@mock.patch('bin.commands.utils.git.get_config_value', return_value=False)
@mock.patch('bin.commands.utils.git.is_empty_repository', return_value=False)
@mock.patch('bin.commands.state._print_section')
@mock.patch('bin.commands.settings.list_')
@mock.patch('bin.commands.utils.execute.check_output', return_value='100')
@mock.patch('bin.commands.utils.execute.call')
@mock.patch('bin.commands.utils.messages.info')
@mock.patch('bin.commands.utils.execute.execute')
def test_state_withextensions_withoptions_fromcommandlineandconfig(
self,
mock_execute,
mock_info,
mock_call,
mock_checkoutput,
mock_list,
mock_printsection,
mock_isemptyrepository,
mock_getconfigvalue,
mock_isgitrepository
):
# setup
format_ = 'compact'
kwargs = {
'show_color': 'never',
'format_': format_,
'clear': False,
'ignore_extensions': ['status'],
'options': {'changes': ['--option1', '-o "1 2"']}
}
changes_command = 'changes command'
changes_name = 'changes'
changes_output = 'the changes'
mock_getconfigvalue.side_effect = [True, True, changes_command, changes_name, ['--option2 true'], True, []]
mock_list.return_value = 'git-state.extensions.changes'
mock_execute.return_value = [changes_output, None, 0]
mock_printsection.return_value = 'final changes output\n'
# when
state.state(**kwargs)
# then
mock_isgitrepository.assert_called_once_with()
mock_isemptyrepository.assert_called_once_with()
mock_printsection.assert_called_once_with(
title=changes_name,
text=changes_output,
format_=format_,
show_empty=None,
color='never'
)
mock_getconfigvalue.assert_has_calls([
mock.call('git-state.status.show-clean-message', default=True, as_type=mock.ANY),
mock.call('git-state.extensions.changes.show', default=True, as_type=mock.ANY),
mock.call('git-state.extensions.changes.command'),
mock.call('git-state.extensions.changes.name', default='changes'),
mock.call('git-state.extensions.changes.options', default=[], as_type=mock.ANY),
mock.call('git-state.extensions.changes.color', default=True, as_type=mock.ANY),
mock.call('git-state.order', default=[], as_type=mock.ANY)
])
mock_list.assert_called_once_with(limit_to='sections')
mock_checkoutput.assert_called_once_with('tput lines'.split())
mock_info.assert_called_once_with('final changes output')
mock_call.assert_not_called()
mock_execute.assert_called_once_with(['changes', 'command', '--option2', 'true', '--option1', '-o', '1 2', '--color=never'])
@mock.patch('bin.commands.utils.directories.is_git_repository', return_value=True)
@mock.patch('bin.commands.utils.git.get_config_value', return_value=False)
@mock.patch('bin.commands.utils.git.is_empty_repository', return_value=False)
@mock.patch('bin.commands.state._print_section')
@mock.patch('bin.commands.settings.list_')
@mock.patch('bin.commands.utils.execute.check_output', return_value='100')
@mock.patch('bin.commands.utils.execute.call')
@mock.patch('bin.commands.utils.messages.info')
@mock.patch('subprocess.Popen')
def test_state_withextensions_butignoresome(
self,
mock_popen,
mock_info,
mock_call,
mock_checkoutput,
mock_list,
mock_printsection,
mock_isemptyrepository,
mock_getconfigvalue,
mock_isgitrepository
):
# setup
format_ = 'compact'
kwargs = {
'show_color': 'never',
'format_': format_,
'clear': False,
'ignore_extensions': ['changes', 'status'],
'options': {}
}
mock_getconfigvalue.side_effect = [True, []]
mock_list.return_value = 'git-state.extentions.changes'
# when
state.state(**kwargs)
# then
mock_isgitrepository.assert_called_once_with()
mock_isemptyrepository.assert_called_once_with()
mock_printsection.assert_not_called()
mock_getconfigvalue.assert_has_calls([
mock.call('git-state.status.show-clean-message', default=True, as_type=mock.ANY),
mock.call('git-state.order', default=[], as_type=mock.ANY)
])
mock_list.assert_called_once_with(limit_to='sections')
mock_checkoutput.assert_not_called()
mock_info.assert_not_called()
mock_call.assert_not_called()
mock_popen.assert_not_called()
@mock.patch('bin.commands.utils.directories.is_git_repository', return_value=True)
@mock.patch('bin.commands.utils.git.get_config_value', return_value=False)
@mock.patch('bin.commands.utils.git.is_empty_repository', return_value=False)
@mock.patch('bin.commands.state._print_section')
@mock.patch('bin.commands.settings.list_')
@mock.patch('bin.commands.utils.execute.check_output', return_value='100')
@mock.patch('bin.commands.utils.execute.call')
@mock.patch('bin.commands.utils.messages.info')
@mock.patch('subprocess.Popen')
def test_state_withextensions_butignoresome_viaconfig(
self,
mock_popen,
mock_info,
mock_call,
mock_checkoutput,
mock_list,
mock_printsection,
mock_isemptyrepository,
mock_getconfigvalue,
mock_isgitrepository
):
# setup
format_ = 'compact'
kwargs = {
'show_color': 'never',
'format_': format_,
'clear': False,
'ignore_extensions': ['status'],
'options': {}
}
mock_getconfigvalue.side_effect = [True, False, []]
mock_list.return_value = 'git-state.extensions.changes'
# when
state.state(**kwargs)
# then
mock_isgitrepository.assert_called_once_with()
mock_isemptyrepository.assert_called_once_with()
mock_printsection.assert_not_called()
mock_getconfigvalue.assert_has_calls([
mock.call('git-state.status.show-clean-message', default=True, as_type=mock.ANY),
mock.call('git-state.extensions.changes.show', default=True, as_type=mock.ANY),
mock.call('git-state.order', default=[], as_type=mock.ANY)
])
mock_list.assert_called_once_with(limit_to='sections')
mock_checkoutput.assert_not_called()
mock_info.assert_not_called()
mock_call.assert_not_called()
mock_popen.assert_not_called()
@mock.patch('bin.commands.utils.directories.is_git_repository', return_value=True)
@mock.patch('bin.commands.utils.git.get_config_value', return_value=False)
@mock.patch('bin.commands.utils.git.is_empty_repository', return_value=False)
@mock.patch('bin.commands.state._print_section')
@mock.patch('bin.commands.settings.list_')
@mock.patch('bin.commands.utils.execute.check_output', return_value='100')
@mock.patch('bin.commands.utils.execute.call')
@mock.patch('bin.commands.utils.messages.info')
@mock.patch('bin.commands.utils.execute.execute')
def test_state_withextensions_ignoredViaConfig_showViaCommandLine(
self,
mock_execute,
mock_info,
mock_call,
mock_checkoutput,
mock_list,
mock_printsection,
mock_isemptyrepository,
mock_getconfigvalue,
mock_isgitrepository
):
# setup
format_ = 'compact'
kwargs = {
'show_color': 'never',
'format_': format_,
'clear': False,
'ignore_extensions': ['status'],
'show_extensions': ['changes'],
'options': {}
}
changes_command = 'changes command'
changes_name = 'changes'
changes_output = 'the changes'
mock_getconfigvalue.side_effect = [True, changes_command, changes_name, [], True, []]
mock_list.return_value = 'git-state.extensions.changes'
mock_execute.return_value = [changes_output, None, 0]
mock_printsection.return_value = 'final changes output\n'
# when
state.state(**kwargs)
# then
mock_isgitrepository.assert_called_once_with()
mock_isemptyrepository.assert_called_once_with()
mock_printsection.assert_called_once_with(
title=changes_name,
text=changes_output,
format_=format_,
show_empty=None,
color='never'
)
mock_getconfigvalue.assert_has_calls([
mock.call('git-state.status.show-clean-message', default=True, as_type=mock.ANY),
mock.call('git-state.extensions.changes.command'),
mock.call('git-state.extensions.changes.name', default='changes'),
mock.call('git-state.extensions.changes.options', default=[], as_type=mock.ANY),
mock.call('git-state.extensions.changes.color', default=True, as_type=mock.ANY),
mock.call('git-state.order', default=[], as_type=mock.ANY)
])
mock_list.assert_called_once_with(limit_to='sections')
mock_checkoutput.assert_called_once_with('tput lines'.split())
mock_info.assert_called_once_with('final changes output')
mock_call.assert_not_called()
mock_execute.assert_called_once_with(['changes', 'command', '--color=never'])
@mock.patch('bin.commands.utils.directories.is_git_repository', return_value=True)
@mock.patch('bin.commands.utils.git.get_config_value', return_value=False)
@mock.patch('bin.commands.utils.git.is_empty_repository', return_value=False)
@mock.patch('bin.commands.stateextensions.status.get')
@mock.patch('bin.commands.stateextensions.status.title')
@mock.patch('bin.commands.stateextensions.status.accent')
@mock.patch('bin.commands.state._print_section')
@mock.patch('bin.commands.settings.list_')
@mock.patch('bin.commands.utils.execute.check_output', return_value='100')
@mock.patch('bin.commands.utils.execute.call')
@mock.patch('bin.commands.utils.messages.info')
@mock.patch('bin.commands.utils.execute.execute')
def test_state_withorder(
self,
mock_execute,
mock_info,
mock_call,
mock_checkoutput,
mock_list,
mock_printsection,
mock_statusaccent,
mock_statustitle,
mock_statusget,
mock_isemptyrepository,
mock_getconfigvalue,
mock_isgitrepository
):
# setup
format_ = 'compact'
kwargs = {
'show_color': 'never',
'format_': format_,
'clear': False,
'ignore_extensions': [],
'options': {},
'show_empty': True
}
mock_statusget.return_value = 'status output'
mock_statustitle.return_value = 'status'
mock_statusaccent.return_value = 'status accent'
mock_printsection.side_effect = ['status section\n', 'changes section\n']
changes_command = 'changes command'
changes_name = 'changes'
changes_output = 'the changes'
mock_getconfigvalue.side_effect = [True, True, changes_command, changes_name, [], True, ['changes', 'status']]
mock_list.return_value = 'git-state.extensions.changes'
mock_execute.return_value = [changes_output, None, 0]
# when
state.state(**kwargs)
# then
mock_isgitrepository.assert_called_once_with()
mock_isemptyrepository.assert_called_once_with()
mock_printsection.assert_has_calls([
mock.call(
mock_statustitle.return_value,
mock_statusaccent.return_value,
mock_statusget.return_value,
format_,
show_empty=True,
color='never'
),
mock.call(
title=changes_name,
text=changes_output,
format_=format_,
show_empty=True,
color='never'
)
])
mock_getconfigvalue.assert_has_calls([
mock.call('git-state.status.show-clean-message', default=True, as_type=mock.ANY),
mock.call('git-state.extensions.changes.show', default=True, as_type=mock.ANY),
mock.call('git-state.extensions.changes.command'),
mock.call('git-state.extensions.changes.name', default='changes'),
mock.call('git-state.extensions.changes.options', default=[], as_type=mock.ANY),
mock.call('git-state.extensions.changes.color', default=True, as_type=mock.ANY),
mock.call('git-state.order', default=[], as_type=mock.ANY)
])
self.assertEqual(mock_getconfigvalue.call_args_list[0][1]['as_type'].func_name, 'as_bool')
mock_list.assert_called_once_with(limit_to='sections')
mock_checkoutput.assert_called_once_with('tput lines'.split())
mock_info.assert_called_once_with('changes section\nstatus section')
mock_call.assert_not_called()
mock_execute.assert_called_once_with(['changes', 'command', '--color=never'])
@mock.patch('bin.commands.utils.directories.is_git_repository', return_value=True)
@mock.patch('bin.commands.utils.git.get_config_value', return_value=False)
@mock.patch('bin.commands.utils.git.is_empty_repository', return_value=False)
@mock.patch('bin.commands.stateextensions.status.get')
@mock.patch('bin.commands.stateextensions.status.title')
@mock.patch('bin.commands.stateextensions.status.accent')
@mock.patch('bin.commands.state._print_section')
@mock.patch('bin.commands.settings.list_')
@mock.patch('bin.commands.utils.execute.check_output', return_value='100')
@mock.patch('bin.commands.utils.execute.call')
@mock.patch('bin.commands.utils.messages.info')
def test_state_withorder_withunknownsection(
self,
mock_info,
mock_call,
mock_checkoutput,
mock_list,
mock_printsection,
mock_statusaccent,
mock_statustitle,
mock_statusget,
mock_isemptyrepository,
mock_getconfigvalue,
mock_isgitrepository
):
# setup
format_ = 'compact'
kwargs = {
'show_color': 'never',
'format_': format_,
'clear': False,
'ignore_extensions': [],
'show_empty': True
}
mock_statusget.return_value = 'status output'
mock_statustitle.return_value = 'status'
mock_statusaccent.return_value = 'status accent'
mock_printsection.side_effect = ['status section\n']
mock_getconfigvalue.side_effect = [True, ['status', 'unknown']]
mock_list.return_value = ''
# when
state.state(**kwargs)
# then
mock_isgitrepository.assert_called_once_with()
mock_isemptyrepository.assert_called_once_with()
mock_printsection.assert_called_once_with(
mock_statustitle.return_value,
mock_statusaccent.return_value,
mock_statusget.return_value,
format_,
show_empty=True,
color='never'
)
mock_getconfigvalue.assert_has_calls([
mock.call('git-state.status.show-clean-message', default=True, as_type=mock.ANY),
mock.call('git-state.order', default=[], as_type=mock.ANY)
])
self.assertEqual(mock_getconfigvalue.call_args_list[0][1]['as_type'].func_name, 'as_bool')
mock_list.assert_called_once_with(limit_to='sections')
mock_checkoutput.assert_called_once_with('tput lines'.split())
mock_info.assert_called_once_with('status section')
mock_call.assert_not_called()
@mock.patch('bin.commands.utils.directories.is_git_repository', return_value=True)
@mock.patch('bin.commands.utils.git.get_config_value', return_value=False)
@mock.patch('bin.commands.utils.git.is_empty_repository', return_value=False)
@mock.patch('bin.commands.stateextensions.status.get')
@mock.patch('bin.commands.stateextensions.status.title')
@mock.patch('bin.commands.stateextensions.status.accent')
@mock.patch('bin.commands.state._print_section')
@mock.patch('bin.commands.settings.list_')
@mock.patch('bin.commands.utils.execute.check_output', return_value='1')
@mock.patch('bin.commands.utils.execute.call')
@mock.patch('bin.commands.utils.messages.info')
@mock.patch('bin.commands.utils.execute.pipe')
def test_state_pageOutput(
self,
mock_pipe,
mock_info,
mock_call,
mock_checkoutput,
mock_list,
mock_printsection,
mock_statusaccent,
mock_statustitle,
mock_statusget,
mock_isemptyrepository,
mock_getconfigvalue,
mock_isgitrepository
):
# setup
format_ = 'compact'
kwargs = {
'show_color': 'never',
'format_': format_,
'clear': False,
'ignore_extensions': [],
'show_empty': True
}
mock_list.return_value = ''
mock_statusget.return_value = 'status output'
mock_statustitle.return_value = 'status title'
mock_statusaccent.return_value = 'status accent'
mock_printsection.return_value = 'status section\ntwo\nthree\nfour\nfive\n'
mock_getconfigvalue.side_effect = [True, []]
# when
state.state(**kwargs)
# then
mock_isgitrepository.assert_called_once_with()
mock_isemptyrepository.assert_called_once_with()
mock_printsection.assert_called_once_with(
mock_statustitle.return_value,
mock_statusaccent.return_value,
mock_statusget.return_value,
format_,
show_empty=True,
color='never'
)
mock_getconfigvalue.assert_has_calls([
mock.call('git-state.status.show-clean-message', default=True, as_type=mock.ANY),
mock.call('git-state.order', default=[], as_type=mock.ANY)
])
self.assertEqual(mock_getconfigvalue.call_args_list[0][1]['as_type'].func_name, 'as_bool')
mock_list.assert_called_once_with(limit_to='sections')
mock_checkoutput.assert_called_once_with('tput lines'.split())
mock_info.assert_not_called()
mock_pipe.assert_called_once_with(['echo', 'status section\ntwo\nthree\nfour\nfive'], ['less', '-r'])
@mock.patch('bin.commands.utils.directories.is_git_repository', return_value=True)
@mock.patch('bin.commands.utils.git.get_config_value', return_value=False)
@mock.patch('bin.commands.utils.git.is_empty_repository', return_value=False)
@mock.patch('bin.commands.stateextensions.status.get')
@mock.patch('bin.commands.stateextensions.status.title')
@mock.patch('bin.commands.stateextensions.status.accent')
@mock.patch('bin.commands.state._print_section')
@mock.patch('bin.commands.settings.list_')
@mock.patch('bin.commands.utils.execute.check_output', return_value='1')
@mock.patch('bin.commands.utils.messages.info')
def test_state_doNotPageOutputEvenIfTooLarge(
self,
mock_info,
mock_checkoutput,
mock_list,
mock_printsection,
mock_statusaccent,
mock_statustitle,
mock_statusget,
mock_isemptyrepository,
mock_getconfigvalue,
mock_isgitrepository
):
# setup
format_ = 'compact'
kwargs = {
'show_color': 'never',
'format_': format_,
'clear': False,
'ignore_extensions': [],
'page': False,
'show_empty': True
}
mock_list.return_value = ''
mock_statusget.return_value = 'status output'
mock_statustitle.return_value = 'status title'
mock_statusaccent.return_value = 'status accent'
mock_printsection.return_value = 'status section\ntwo\nthree\nfour\nfive\n'
mock_getconfigvalue.side_effect = [True, []]
# when
state.state(**kwargs)
# then
mock_isgitrepository.assert_called_once_with()
mock_isemptyrepository.assert_called_once_with()
mock_printsection.assert_called_once_with(
mock_statustitle.return_value,
mock_statusaccent.return_value,
mock_statusget.return_value,
format_,
show_empty=True,
color='never'
)
mock_getconfigvalue.assert_has_calls([
mock.call('git-state.status.show-clean-message', default=True, as_type=mock.ANY),
mock.call('git-state.order', default=[], as_type=mock.ANY)
])
self.assertEqual(mock_getconfigvalue.call_args_list[0][1]['as_type'].func_name, 'as_bool')
mock_list.assert_called_once_with(limit_to='sections')
mock_checkoutput.assert_called_once_with('tput lines'.split())
mock_info.assert_called_once_with('status section\ntwo\nthree\nfour\nfive')
@mock.patch('bin.commands.utils.directories.is_git_repository', return_value=True)
@mock.patch('colorama.init')
@mock.patch('bin.commands.utils.git.get_config_value', return_value=False)
@mock.patch('bin.commands.utils.git.is_empty_repository', return_value=False)
@mock.patch('bin.commands.stateextensions.status.get')
@mock.patch('bin.commands.stateextensions.status.title')
@mock.patch('bin.commands.stateextensions.status.accent')
@mock.patch('bin.commands.state._print_section')
@mock.patch('bin.commands.settings.list_')
@mock.patch('bin.commands.utils.execute.check_output', return_value='100')
@mock.patch('bin.commands.utils.execute.call')
@mock.patch('bin.commands.utils.messages.info')
@mock.patch('sys.stdout.isatty', return_value=True)
def test_state_clear(
self,
mock_isatty,
mock_info,
mock_call,
mock_checkoutput,
mock_list,
mock_printsection,
mock_statusaccent,
mock_statustitle,
mock_statusget,
mock_isemptyrepository,
mock_getconfigvalue,
mock_init,
mock_isgitrepository
):
# setup
format_ = 'compact'
kwargs = {
'show_color': 'never',
'format_': format_,
'ignore_extensions': [],
'clear': True,
'show_empty': True
}
mock_list.return_value = ''
mock_statusget.return_value = 'status output'
mock_statustitle.return_value = 'status title'
mock_statusaccent.return_value = 'status accent'
mock_printsection.return_value = 'status section\n'
mock_getconfigvalue.side_effect = [True, []]
# when
state.state(**kwargs)
# then
mock_isgitrepository.assert_called_once_with()
mock_init.assert_called_once_with(strip=True)
mock_isemptyrepository.assert_called_once_with()
mock_printsection.assert_called_once_with(
mock_statustitle.return_value,
mock_statusaccent.return_value,
mock_statusget.return_value,
format_,
show_empty=True,
color='never'
)
mock_getconfigvalue.assert_has_calls([
mock.call('git-state.status.show-clean-message', default=True, as_type=mock.ANY),
mock.call('git-state.order', default=[], as_type=mock.ANY)
])
self.assertEqual(mock_getconfigvalue.call_args_list[0][1]['as_type'].func_name, 'as_bool')
mock_list.assert_called_once_with(limit_to='sections')
mock_checkoutput.assert_called_once_with('tput lines'.split())
mock_info.assert_called_once_with('status section')
mock_call.assert_called_once_with('clear')
@mock.patch('bin.commands.utils.directories.is_git_repository', return_value=True)
@mock.patch('bin.commands.utils.git.get_config_value', return_value=False)
@mock.patch('bin.commands.utils.git.is_empty_repository', return_value=False)
@mock.patch('bin.commands.stateextensions.status.get')
@mock.patch('bin.commands.stateextensions.status.title')
@mock.patch('bin.commands.stateextensions.status.accent')
@mock.patch('bin.commands.state._print_section')
@mock.patch('bin.commands.settings.list_')
@mock.patch('bin.commands.utils.execute.check_output', return_value='100')
@mock.patch('bin.commands.utils.execute.call')
@mock.patch('bin.commands.utils.messages.info')
@mock.patch('sys.stdout.isatty', return_value=False)
def test_state_clear_notatty(
self,
mock_isatty,
mock_info,
mock_call,
mock_checkoutput,
mock_list,
mock_printsection,
mock_statusaccent,
mock_statustitle,
mock_statusget,
mock_isemptyrepository,
mock_getconfigvalue,
mock_isgitrepository
):
# setup
format_ = 'compact'
kwargs = {
'show_color': 'never',
'format_': format_,
'ignore_extensions': [],
'clear': True,
'show_empty': True
}
mock_list.return_value = ''
mock_statusget.return_value = 'status output'
mock_statustitle.return_value = 'status title'
mock_statusaccent.return_value = 'status accent'
mock_printsection.return_value = 'status section\n'
mock_getconfigvalue.side_effect = [True, []]
# when
state.state(**kwargs)
# then
mock_isgitrepository.assert_called_once_with()
mock_isemptyrepository.assert_called_once_with()
mock_printsection.assert_called_once_with(
mock_statustitle.return_value,
mock_statusaccent.return_value,
mock_statusget.return_value,
format_,
show_empty=True,
color='never'
)
mock_getconfigvalue.assert_has_calls([
mock.call('git-state.status.show-clean-message', default=True, as_type=mock.ANY),
mock.call('git-state.order', default=[], as_type=mock.ANY)
])
self.assertEqual(mock_getconfigvalue.call_args_list[0][1]['as_type'].func_name, 'as_bool')
mock_list.assert_called_once_with(limit_to='sections'
)
mock_checkoutput.assert_called_once_with('tput lines'.split())
mock_info.assert_called_once_with('status section')
mock_call.assert_not_called()
@mock.patch('bin.commands.utils.directories.is_git_repository', return_value=True)
@mock.patch('bin.commands.utils.git.get_config_value', return_value=False)
@mock.patch('bin.commands.utils.git.is_empty_repository', return_value=False)
@mock.patch('bin.commands.stateextensions.status.get')
@mock.patch('bin.commands.stateextensions.status.title')
@mock.patch('bin.commands.stateextensions.status.accent')
@mock.patch('bin.commands.state._print_section')
@mock.patch('bin.commands.settings.list_')
@mock.patch('bin.commands.utils.execute.check_output', return_value='100')
@mock.patch('bin.commands.utils.execute.call')
@mock.patch('bin.commands.utils.messages.info')
def test_state_clear_noclear(
self,
mock_info,
mock_call,
mock_checkoutput,
mock_list,
mock_printsection,
mock_statusaccent,
mock_statustitle,
mock_statusget,
mock_isemptyrepository,
mock_getconfigvalue,
mock_isgitrepository
):
# setup
format_ = 'compact'
kwargs = {
'show_color': 'never',
'format_': format_,
'ignore_extensions': [],
'clear': False,
'show_empty': True
}
mock_list.return_value = ''
mock_statusget.return_value = 'status output'
mock_statustitle.return_value = 'status title'
mock_statusaccent.return_value = 'status accent'
mock_printsection.return_value = 'status section\n'
mock_getconfigvalue.side_effect = [True, []]
# when
state.state(**kwargs)
# then
mock_isgitrepository.assert_called_once_with()
mock_isemptyrepository.assert_called_once_with()
mock_printsection.assert_called_once_with(
mock_statustitle.return_value,
mock_statusaccent.return_value,
mock_statusget.return_value,
format_,
show_empty=True,
color='never'
)
mock_getconfigvalue.assert_has_calls([
mock.call('git-state.status.show-clean-message', default=True, as_type=mock.ANY),
mock.call('git-state.order', default=[], as_type=mock.ANY)
])
self.assertEqual(mock_getconfigvalue.call_args_list[0][1]['as_type'].func_name, 'as_bool')
mock_list.assert_called_once_with(limit_to='sections')
mock_checkoutput.assert_called_once_with('tput lines'.split())
mock_info.assert_called_once_with('status section')
mock_call.assert_not_called()
class TestStateExtensionExists(unittest.TestCase):
@mock.patch('bin.commands.settings.list_')
def test_state_extensionExists(self, mock_list):
# given
mock_list.return_value = '1'
# when
exists = state._extension_exists('log')
# then
mock_list.assert_called_once_with('git-state.extensions.log', count=True)
self.assertTrue(exists)
class TestStateEditExtension(unittest.TestCase):
@mock.patch('bin.commands.state._extension_exists')
@mock.patch('bin.commands.utils.execute.call')
@mock.patch('bin.commands.utils.messages.info')
def test_state_editExtension_created(self, mock_info, mock_call, mock_extension_exists):
# given
mock_extension_exists.return_value = False
# when
state.edit_extension('log', command='git log', name='the log', options='-10', show=True, color=False)
# then
mock_extension_exists.assert_called_once_with('log')
mock_call.assert_has_calls([
mock.call(['git', 'config', '--local', 'git-state.extensions.log.command', 'git log']),
mock.call(['git', 'config', '--local', 'git-state.extensions.log.name', 'the log']),
mock.call(['git', 'config', '--local', 'git-state.extensions.log.options', '-10']),
mock.call(['git', 'config', '--local', 'git-state.extensions.log.show', 'True']),
mock.call(['git', 'config', '--local', 'git-state.extensions.log.color', 'False'])
])
mock_info.assert_called_once_with('Extension log created')
@mock.patch('bin.commands.state._extension_exists')
@mock.patch('bin.commands.utils.execute.call')
@mock.patch('bin.commands.utils.messages.info')
def test_state_editExtension_edited(self, mock_info, mock_call, mock_extension_exists):
# given
mock_extension_exists.return_value = True
# when
state.edit_extension('log', command='git log', name='the log', options='-10', show=True, color=False)
# then
mock_extension_exists.assert_called_once_with('log')
mock_call.assert_has_calls([
mock.call(['git', 'config', '--local', 'git-state.extensions.log.command', 'git log']),
mock.call(['git', 'config', '--local', 'git-state.extensions.log.name', 'the log']),
mock.call(['git', 'config', '--local', 'git-state.extensions.log.options', '-10']),
mock.call(['git', 'config', '--local', 'git-state.extensions.log.show', 'True']),
mock.call(['git', 'config', '--local', 'git-state.extensions.log.color', 'False'])
])
mock_info.assert_called_once_with('Extension log updated')
@mock.patch('bin.commands.state._extension_exists')
@mock.patch('bin.commands.utils.execute.call')
@mock.patch('bin.commands.utils.messages.info')
def test_state_editExtension_onlyCommand(self, mock_info, mock_call, mock_extension_exists):
# given
mock_extension_exists.return_value = True
# when
state.edit_extension('log', command='git log', color=None)
# then
mock_extension_exists.assert_called_once_with('log')
mock_call.assert_called_once_with(['git', 'config', '--local', 'git-state.extensions.log.command', 'git log'])
mock_info.assert_called_once_with('Extension log updated')
@mock.patch('bin.commands.state._extension_exists')
@mock.patch('bin.commands.utils.execute.call')
@mock.patch('bin.commands.utils.messages.info')
def test_state_editExtension_onlyName(self, mock_info, mock_call, mock_extension_exists):
# given
mock_extension_exists.return_value = True
# when
state.edit_extension('log', name='the log', color=None)
# then
mock_extension_exists.assert_called_once_with('log')
mock_call.assert_called_once_with(['git', 'config', '--local', 'git-state.extensions.log.name', 'the log'])
mock_info.assert_called_once_with('Extension log updated')
@mock.patch('bin.commands.state._extension_exists')
@mock.patch('bin.commands.utils.execute.call')
@mock.patch('bin.commands.utils.messages.info')
def test_state_editExtension_onlyOptions(self, mock_info, mock_call, mock_extension_exists):
# given
mock_extension_exists.return_value = True
# when
state.edit_extension('log', options='-10', color=None)
# then
mock_extension_exists.assert_called_once_with('log')
mock_call.assert_called_once_with(['git', 'config', '--local', 'git-state.extensions.log.options', '-10'])
mock_info.assert_called_once_with('Extension log updated')
@mock.patch('bin.commands.state._extension_exists')
@mock.patch('bin.commands.utils.execute.call')
@mock.patch('bin.commands.utils.messages.info')
def test_state_editExtension_onlyShow(self, mock_info, mock_call, mock_extension_exists):
# given
mock_extension_exists.return_value = True
# when
state.edit_extension('log', show=False, color=None)
# then
mock_extension_exists.assert_called_once_with('log')
mock_call.assert_called_once_with(['git', 'config', '--local', 'git-state.extensions.log.show', 'False'])
mock_info.assert_called_once_with('Extension log updated')
@mock.patch('bin.commands.state._extension_exists')
@mock.patch('bin.commands.utils.execute.call')
@mock.patch('bin.commands.utils.messages.info')
def test_state_editExtension_onlyColor(self, mock_info, mock_call, mock_extension_exists):
# given
mock_extension_exists.return_value = True
# when
state.edit_extension('log', color=True)
# then
mock_extension_exists.assert_called_once_with('log')
mock_call.assert_called_once_with(['git', 'config', '--local', 'git-state.extensions.log.color', 'True'])
mock_info.assert_called_once_with('Extension log updated')
class TestStateGetExtensions(unittest.TestCase):
@mock.patch('bin.commands.settings.list_')
def test_state_getExtensions(self, mock_list):
# given
mock_list.return_value = '''git-state.extensions.log
git-state.extensions.changes'''
# when
extensions = state.get_extensions()
# then
mock_list.assert_called_once_with(limit_to='sections')
self.assertEqual(extensions, ['log', 'changes'])
@mock.patch('bin.commands.settings.list_')
def test_state_getExtensions_noExtensionsExist(self, mock_list):
# given
mock_list.return_value = ''
# when
extensions = state.get_extensions()
# then
mock_list.assert_called_once_with(limit_to='sections')
self.assertEqual(extensions, [])
class TestStatePrintExtensions(unittest.TestCase):
@mock.patch('bin.commands.state.get_extensions')
@mock.patch('bin.commands.utils.messages.info')
def test_state_printExtensions(self, mock_info, mock_get_extensions):
# given
mock_get_extensions.return_value = ['log', 'changes', 'stashes']
# when
state.print_extensions()
# then
mock_get_extensions.assert_called_once()
mock_info.assert_called_once_with('''changes
log
stashes''')
@mock.patch('bin.commands.state.get_extensions')
@mock.patch('bin.commands.utils.messages.info')
def test_state_printExtensions_noExtensionsExist(self, mock_info, mock_get_extensions):
# given
mock_get_extensions.return_value = []
# when
state.print_extensions()
# then
mock_get_extensions.assert_called_once()
mock_info.assert_not_called()
class TestStatePrintExtensionConfig(unittest.TestCase):
@mock.patch('bin.commands.settings.list_')
@mock.patch('bin.commands.utils.messages.info')
def test_state_printExtensionConfig(self, mock_info, mock_list):
# given
mock_list.return_value = 'config'
# when
state.print_extension_config('log')
# then
mock_list.assert_called_once_with(section='git-state.extensions.log', format_='pretty')
mock_info.assert_called_once_with('config')
@mock.patch('bin.commands.settings.list_')
@mock.patch('bin.commands.utils.messages.info')
def test_state_printExtensionConfig_extensionDoesNotExist(self, mock_info, mock_list):
# given
mock_list.return_value = ''
# when
state.print_extension_config('log')
# then
mock_list.assert_called_once_with(section='git-state.extensions.log', format_='pretty')
mock_info.assert_not_called()
class TestStateRunExtension(unittest.TestCase):
@mock.patch('colorama.init')
@mock.patch('bin.commands.state._extension_exists')
@mock.patch('bin.commands.state._run_extension')
@mock.patch('bin.commands.state._print_section')
@mock.patch('bin.commands.state._print_sections')
def test_state_runExtension(self, mock_print_sections, mock_print_section, mock_run_extension, mock_extension_exists, mock_init):
# given
mock_extension_exists.return_value = True
log_name = 'the log'
log_text = 'log text'
mock_run_extension.return_value = (log_name, log_text)
section_text = 'section text'
mock_print_section.return_value = section_text
# when
state.run_extension('log')
# then
mock_init.assert_called_once_with(strip=True)
mock_extension_exists.assert_called_once_with('log')
mock_run_extension.assert_called_once_with('log', {}, 'never')
mock_print_section.assert_called_once_with(log_name, text=log_text, show_empty=True, color='never')
mock_print_sections.assert_called_once_with({log_name: section_text})
@mock.patch('colorama.init')
@mock.patch('bin.commands.state._extension_exists')
@mock.patch('bin.commands.state._run_extension')
@mock.patch('bin.commands.state._print_section')
@mock.patch('bin.commands.state._print_sections')
def test_state_runExtension_extensionDoesNotExist(self, mock_print_sections, mock_print_section, mock_run_extension, mock_extension_exists, mock_init):
# given
mock_extension_exists.return_value = False
# when
state.run_extension('log')
# then
mock_init.assert_called_once_with(strip=True)
mock_extension_exists.assert_called_once_with('log')
mock_run_extension.assert_not_called()
mock_print_section.assert_not_called()
mock_print_sections.assert_not_called()
class TestStateDeleteExtension(unittest.TestCase):
@mock.patch('bin.commands.state._extension_exists')
@mock.patch('bin.commands.utils.execute.call')
@mock.patch('bin.commands.utils.messages.info')
def test_state_deleteExtension(self, mock_info, mock_call, mock_extension_exists):
# given
mock_extension_exists.return_value = True
# when
state.delete_extension('log')
# then
mock_extension_exists.assert_called_once_with('log')
mock_call.assert_called_once_with('git config --local --remove-section git-state.extensions.log'.split())
mock_info.assert_called_once_with('Extension log deleted')
@mock.patch('bin.commands.state._extension_exists')
@mock.patch('bin.commands.utils.execute.call')
@mock.patch('bin.commands.utils.messages.info')
def test_state_deleteExtension_extensionDoesNotExist(elf, mock_info, mock_call, mock_extension_exists):
# given
mock_extension_exists.return_value = False
# when
state.delete_extension('log')
# then
mock_extension_exists.assert_called_once_with('log')
mock_call.assert_not_called()
mock_info.assert_not_called()
|
|
# Copyright (c) 2015 Huawei Technologies Co., Ltd.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""Tests for huawei 18000 storage."""
import json
import mock
import os
import shutil
import tempfile
import time
from xml.dom import minidom
from oslo_log import log as logging
from cinder import exception
from cinder import test
from cinder.volume import configuration as conf
from cinder.volume.drivers.huawei import constants
from cinder.volume.drivers.huawei import huawei_driver
from cinder.volume.drivers.huawei import huawei_utils
from cinder.volume.drivers.huawei import rest_client
LOG = logging.getLogger(__name__)
test_volume = {'name': 'volume-21ec7341-9256-497b-97d9-ef48edcf0635',
'size': 2,
'volume_name': 'vol1',
'id': '21ec7341-9256-497b-97d9-ef48edcf0635',
'volume_id': '21ec7341-9256-497b-97d9-ef48edcf0635',
'provider_auth': None,
'project_id': 'project',
'display_name': 'vol1',
'display_description': 'test volume',
'volume_type_id': None,
'host': 'ubuntu@huawei#OpenStack_Pool',
'provider_location': '11',
}
error_volume = {'name': 'volume-21ec7341-9256-497b-97d9-ef48edcf0637',
'size': 2,
'volume_name': 'vol2',
'id': '21ec7341-9256-497b-97d9-ef48edcf0637',
'volume_id': '21ec7341-9256-497b-97d9-ef48edcf0637',
'provider_auth': None,
'project_id': 'project',
'display_name': 'vol2',
'display_description': 'test error_volume',
'volume_type_id': None,
'host': 'ubuntu@huawei#OpenStack_Pool_error',
'provider_location': '12',
}
test_snap = {'name': 'volume-21ec7341-9256-497b-97d9-ef48edcf0635',
'size': 1,
'volume_name': 'vol1',
'id': '21ec7341-9256-497b-97d9-ef48edcf0635',
'volume_id': '21ec7341-9256-497b-97d9-ef48edcf0635',
'provider_auth': None,
'project_id': 'project',
'display_name': 'vol1',
'display_description': 'test volume',
'volume_type_id': None,
'provider_location': '11',
}
FakeConnector = {'initiator': 'iqn.1993-08.debian:01:ec2bff7ac3a3',
'wwpns': ['10000090fa0d6754'],
'wwnns': ['10000090fa0d6755'],
'host': 'ubuntuc',
}
# A fake response of success response storage
FAKE_COMMON_SUCCESS_RESPONSE = """
{
"error": {
"code": 0
}
}
"""
# A fake response of login huawei storage
FAKE_GET_LOGIN_STORAGE_RESPONSE = """
{
"error": {
"code": 0
},
"data": {
"username": "admin",
"iBaseToken": "2001031430",
"deviceid": "210235G7J20000000000"
}
}
"""
# A fake response of login out huawei storage
FAKE_LOGIN_OUT_STORAGE_RESPONSE = """
{
"error": {
"code": 0
},
"data": {
"ID": 11
}
}
"""
# A fake response of mock storage pool info
FAKE_STORAGE_POOL_RESPONSE = """
{
"error": {
"code": 0
},
"data": [{
"USERFREECAPACITY": "985661440",
"ID": "0",
"NAME": "OpenStack_Pool",
"USERTOTALCAPACITY": "985661440"
}]
}
"""
# A fake response of lun or lungroup response
FAKE_LUN_INFO_RESPONSE = """
{
"error": {
"code": 0
},
"data": {
"ID": "1",
"NAME": "5mFHcBv4RkCcD+JyrWc0SA"
}
}
"""
FAKE_LUN_DELETE_SUCCESS_RESPONSE = """
{
"error": {
"code": 0
},
"data": {
"ID": "11",
"IOCLASSID": "11",
"NAME": "5mFHcBv4RkCcD+JyrWc0SA",
"RUNNINGSTATUS": "2",
"HEALTHSTATUS": "1",
"RUNNINGSTATUS": "27"
}
}
"""
FAKE_QUERY_ALL_LUN_RESPONSE = """
{
"error": {
"code": 0
},
"data": [{
"ID": "1",
"NAME": "IexzQZJWSXuX2e9I7c8GNQ"
}]
}
"""
FAKE_LUN_ASSOCIATE_RESPONSE = """
{
"error":{
"code":0
},
"data":[{
"ID":"11"
}]
}
"""
FAKE_QUERY_LUN_GROUP_INFO_RESPONSE = """
{
"error": {
"code":0
},
"data":[{
"NAME":"OpenStack_LunGroup_1",
"DESCRIPTION":"5mFHcBv4RkCcD+JyrWc0SA",
"ID":"11",
"TYPE":256
}]
}
"""
FAKE_QUERY_LUN_GROUP_RESPONSE = """
{
"error": {
"code":0
},
"data":{
"NAME":"5mFHcBv4RkCcD+JyrWc0SA",
"DESCRIPTION":"5mFHcBv4RkCcD+JyrWc0SA",
"ID":"11",
"TYPE":256
}
}
"""
FAKE_QUERY_LUN_GROUP_ASSOCIAT_RESPONSE = """
{
"error":{
"code":0
},
"data":{
"NAME":"5mFHcBv4RkCcD+JyrWc0SA",
"DESCRIPTION":"5mFHcBv4RkCcD+JyrWc0SA",
"ID":"11",
"TYPE":256
}
}
"""
FAKE_LUN_COUNT_RESPONSE = """
{
"data":{
"COUNT":"7"
},
"error":{
"code":0,
"description":"0"
}
}
"""
# A fake response of snapshot list response
FAKE_SNAPSHOT_LIST_INFO_RESPONSE = """
{
"error": {
"code": 0
},
"data": [{
"ID": 11,
"NAME": "wr_LMKAjS7O_VtsEIREGYw"
},
{
"ID": 12,
"NAME": "SDFAJSDFLKJ"
}]
}
"""
# A fake response of create snapshot response
FAKE_CREATE_SNAPSHOT_INFO_RESPONSE = """
{
"error": {
"code": 0
},
"data": {
"ID": 11,
"NAME": "YheUoRwbSX2BxN7"
}
}
"""
# A fake response of get snapshot response
FAKE_GET_SNAPSHOT_INFO_RESPONSE = """
{
"error": {
"code": 0
},
"data": {
"ID": 11,
"NAME": "YheUoRwbSX2BxN7"
}
}
"""
# A fake response of get iscsi response
FAKE_GET_ISCSI_INFO_RESPONSE = """
{
"data": [{
"ETHPORTID": "139267",
"ID": "iqn.oceanstor:21004846fb8ca15f::22003:111.111.101.244",
"TPGT": "8196",
"TYPE": 249
},
{
"ETHPORTID": "139268",
"ID": "iqn.oceanstor:21004846fb8ca15f::22003:111.111.102.244",
"TPGT": "8196",
"TYPE": 249
}
],
"error": {
"code": 0,
"description": "0"
}
}
"""
# A fake response of get eth info response
FAKE_GET_ETH_INFO_RESPONSE = """
{
"error": {
"code": 0
},
"data": [{
"PARENTTYPE": 209,
"MACADDRESS": "00:22:a1:0a:79:57",
"ETHNEGOTIATE": "-1",
"ERRORPACKETS": "0",
"IPV4ADDR": "198.100.10.1",
"IPV6GATEWAY": "",
"IPV6MASK": "0",
"OVERFLOWEDPACKETS": "0",
"ISCSINAME": "P0",
"HEALTHSTATUS": "1",
"ETHDUPLEX": "2",
"ID": "16909568",
"LOSTPACKETS": "0",
"TYPE": 213,
"NAME": "P0",
"INIORTGT": "4",
"RUNNINGSTATUS": "10",
"IPV4GATEWAY": "",
"BONDNAME": "",
"STARTTIME": "1371684218",
"SPEED": "1000",
"ISCSITCPPORT": "0",
"IPV4MASK": "255.255.0.0",
"IPV6ADDR": "",
"LOGICTYPE": "0",
"LOCATION": "ENG0.A5.P0",
"MTU": "1500",
"PARENTID": "1.5"
},
{
"PARENTTYPE": 209,
"MACADDRESS": "00:22:a1:0a:79:57",
"ETHNEGOTIATE": "-1",
"ERRORPACKETS": "0",
"IPV4ADDR": "198.100.10.2",
"IPV6GATEWAY": "",
"IPV6MASK": "0",
"OVERFLOWEDPACKETS": "0",
"ISCSINAME": "P0",
"HEALTHSTATUS": "1",
"ETHDUPLEX": "2",
"ID": "16909568",
"LOSTPACKETS": "0",
"TYPE": 213,
"NAME": "P0",
"INIORTGT": "4",
"RUNNINGSTATUS": "10",
"IPV4GATEWAY": "",
"BONDNAME": "",
"STARTTIME": "1371684218",
"SPEED": "1000",
"ISCSITCPPORT": "0",
"IPV4MASK": "255.255.0.0",
"IPV6ADDR": "",
"LOGICTYPE": "0",
"LOCATION": "ENG0.A5.P3",
"MTU": "1500",
"PARENTID": "1.5"
}]
}
"""
FAKE_GET_ETH_ASSOCIATE_RESPONSE = """
{
"error":{
"code":0
},
"data":[{
"IPV4ADDR": "198.100.10.1",
"HEALTHSTATUS": "1",
"RUNNINGSTATUS": "10"
},
{
"IPV4ADDR": "198.100.10.2",
"HEALTHSTATUS": "1",
"RUNNINGSTATUS": "10"
}
]
}
"""
# A fake response of get iscsi device info response
FAKE_GET_ISCSI_DEVICE_RESPONSE = """
{
"error": {
"code": 0
},
"data": [{
"CMO_ISCSI_DEVICE_NAME": "iqn.2006-08.com.huawei:oceanstor:21000022a:"
}]
}
"""
# A fake response of get iscsi device info response
FAKE_GET_ALL_HOST_INFO_RESPONSE = """
{
"error": {
"code": 0
},
"data": [{
"PARENTTYPE": 245,
"NAME": "ubuntuc",
"DESCRIPTION": "",
"RUNNINGSTATUS": "1",
"IP": "",
"PARENTNAME": "",
"OPERATIONSYSTEM": "0",
"LOCATION": "",
"HEALTHSTATUS": "1",
"MODEL": "",
"ID": "1",
"PARENTID": "",
"NETWORKNAME": "",
"TYPE": 21
},
{
"PARENTTYPE": 245,
"NAME": "ubuntu",
"DESCRIPTION": "",
"RUNNINGSTATUS": "1",
"IP": "",
"PARENTNAME": "",
"OPERATIONSYSTEM": "0",
"LOCATION": "",
"HEALTHSTATUS": "1",
"MODEL": "",
"ID": "2",
"PARENTID": "",
"NETWORKNAME": "",
"TYPE": 21
}]
}
"""
# A fake response of get host or hostgroup info response
FAKE_GET_ALL_HOST_GROUP_INFO_RESPONSE = """
{
"error": {
"code": 0
},
"data": [{
"NAME":"OpenStack_HostGroup_1",
"DESCRIPTION":"",
"ID":"0",
"TYPE":14
}]
}
"""
FAKE_GET_HOST_GROUP_INFO_RESPONSE = """
{
"error": {
"code": 0
},
"data":{
"NAME":"ubuntuc",
"DESCRIPTION":"",
"ID":"0",
"TYPE":14
}
}
"""
# A fake response of lun copy info response
FAKE_GET_LUN_COPY_INFO_RESPONSE = """
{
"error": {
"code": 0
},
"data": {
"COPYSTOPTIME": "-1",
"HEALTHSTATUS": "1",
"NAME": "w1PSNvu6RumcZMmSh4/l+Q==",
"RUNNINGSTATUS": "36",
"DESCRIPTION": "w1PSNvu6RumcZMmSh4/l+Q==",
"ID": "0",
"LUNCOPYTYPE": "1",
"COPYPROGRESS": "0",
"COPYSPEED": "2",
"TYPE": 219,
"COPYSTARTTIME": "-1"
}
}
"""
# A fake response of lun copy list info response
FAKE_GET_LUN_COPY_LIST_INFO_RESPONSE = """
{
"error": {
"code": 0
},
"data": [{
"COPYSTOPTIME": "1372209335",
"HEALTHSTATUS": "1",
"NAME": "w1PSNvu6RumcZMmSh4/l+Q==",
"RUNNINGSTATUS": "40",
"DESCRIPTION": "w1PSNvu6RumcZMmSh4/l+Q==",
"ID": "0",
"LUNCOPYTYPE": "1",
"COPYPROGRESS": "100",
"COPYSPEED": "2",
"TYPE": 219,
"COPYSTARTTIME": "1372209329"
}]
}
"""
# A fake response of mappingview info response
FAKE_GET_MAPPING_VIEW_INFO_RESPONSE = """
{
"error":{
"code":0
},
"data":[{
"WORKMODE":"255",
"HEALTHSTATUS":"1",
"NAME":"IexzQZJWSXuX2e9I7c8GNQ",
"RUNNINGSTATUS":"27",
"DESCRIPTION":"",
"ENABLEINBANDCOMMAND":"true",
"ID":"1",
"INBANDLUNWWN":"",
"TYPE":245
},
{
"WORKMODE":"255",
"HEALTHSTATUS":"1",
"NAME":"YheUoRwbSX2BxN767nvLSw",
"RUNNINGSTATUS":"27",
"DESCRIPTION":"",
"ENABLEINBANDCOMMAND":"true",
"ID":"2",
"INBANDLUNWWN":"",
"TYPE":245
}]
}
"""
FAKE_GET_MAPPING_VIEW_RESPONSE = """
{
"error":{
"code":0
},
"data":{
"WORKMODE":"255",
"HEALTHSTATUS":"1",
"NAME":"mOWtSXnaQKi3hpB3tdFRIQ",
"RUNNINGSTATUS":"27",
"DESCRIPTION":"",
"ENABLEINBANDCOMMAND":"true",
"ID":"1",
"INBANDLUNWWN":"",
"TYPE":245
}
}
"""
FAKE_FC_INFO_RESPONSE = """
{
"error":{
"code":0
},
"data":[{
"HEALTHSTATUS":"1",
"NAME":"",
"MULTIPATHTYPE":"1",
"ISFREE":"true",
"RUNNINGSTATUS":"27",
"ID":"10000090fa0d6754",
"OPERATIONSYSTEM":"255",
"TYPE":223
},
{
"HEALTHSTATUS":"1",
"NAME":"",
"MULTIPATHTYPE":"1",
"ISFREE":"true",
"RUNNINGSTATUS":"27",
"ID":"10000090fa0d6755",
"OPERATIONSYSTEM":"255",
"TYPE":223
}]
}
"""
FAKE_ISCSI_INITIATOR_RESPONSE = """
{
"error":{
"code":0
},
"data":[{
"CHAPNAME":"mm-user",
"HEALTHSTATUS":"1",
"ID":"iqn.1993-08.org.debian:01:9073aba6c6f",
"ISFREE":"true",
"MULTIPATHTYPE":"1",
"NAME":"",
"OPERATIONSYSTEM":"255",
"RUNNINGSTATUS":"28",
"TYPE":222,
"USECHAP":"true"
}]
}
"""
FAKE_HOST_LINK_RESPONSE = """
{
"error":{
"code":0
},
"data":[{
"PARENTTYPE":21,
"TARGET_ID":"0000000000000000",
"INITIATOR_NODE_WWN":"20000090fa0d6754",
"INITIATOR_TYPE":"223",
"RUNNINGSTATUS":"27",
"PARENTNAME":"ubuntuc",
"INITIATOR_ID":"10000090fa0d6754",
"TARGET_PORT_WWN":"24000022a10a2a39",
"HEALTHSTATUS":"1",
"INITIATOR_PORT_WWN":"10000090fa0d6754",
"ID":"010000090fa0d675-0000000000110400",
"TARGET_NODE_WWN":"21000022a10a2a39",
"PARENTID":"1",
"CTRL_ID":"0",
"TYPE":255,
"TARGET_TYPE":"212"
}]
}
"""
FAKE_PORT_GROUP_RESPONSE = """
{
"error":{
"code":0
},
"data":[{
"ID":11,
"NAME":"portgroup-test"
}]
}
"""
FAKE_ISCSI_INITIATOR_RESPONSE = """
{
"error":{
"code":0
},
"data":[{
"CHAPNAME":"mm-user",
"HEALTHSTATUS":"1",
"ID":"iqn.1993-08.org.debian:01:9073aba6c6f",
"ISFREE":"true",
"MULTIPATHTYPE":"1",
"NAME":"",
"OPERATIONSYSTEM":"255",
"RUNNINGSTATUS":"28",
"TYPE":222,
"USECHAP":"true"
}]
}
"""
FAKE_ISCSI_INITIATOR_RESPONSE = """
{
"error":{
"code":0
},
"data":[{
"CHAPNAME":"mm-user",
"HEALTHSTATUS":"1",
"ID":"iqn.1993-08.org.debian:01:9073aba6c6f",
"ISFREE":"true",
"MULTIPATHTYPE":"1",
"NAME":"",
"OPERATIONSYSTEM":"255",
"RUNNINGSTATUS":"28",
"TYPE":222,
"USECHAP":"true"
}]
}
"""
FAKE_ERROR_INFO_RESPONSE = """
{
"error":{
"code":31755596
}
}
"""
FAKE_ERROR_LUN_INFO_RESPONSE = """
{
"error":{
"code":0
},
"data":{
"ID":"11",
"IOCLASSID":"11",
"NAME":"5mFHcBv4RkCcD+JyrWc0SA"
}
}
"""
# mock login info map
MAP_COMMAND_TO_FAKE_RESPONSE = {}
MAP_COMMAND_TO_FAKE_RESPONSE['/xx/sessions'] = (
FAKE_GET_LOGIN_STORAGE_RESPONSE)
MAP_COMMAND_TO_FAKE_RESPONSE['sessions'] = (
FAKE_LOGIN_OUT_STORAGE_RESPONSE)
# mock storage info map
MAP_COMMAND_TO_FAKE_RESPONSE['storagepool'] = (
FAKE_STORAGE_POOL_RESPONSE)
# mock lun info map
MAP_COMMAND_TO_FAKE_RESPONSE['lun'] = (
FAKE_LUN_INFO_RESPONSE)
MAP_COMMAND_TO_FAKE_RESPONSE['lun/11/GET'] = (
FAKE_LUN_DELETE_SUCCESS_RESPONSE)
MAP_COMMAND_TO_FAKE_RESPONSE['lun/1/GET'] = (
FAKE_LUN_DELETE_SUCCESS_RESPONSE)
MAP_COMMAND_TO_FAKE_RESPONSE['lun/11/DELETE'] = (
FAKE_COMMON_SUCCESS_RESPONSE)
MAP_COMMAND_TO_FAKE_RESPONSE['lun?range=[0-65535]/GET'] = (
FAKE_QUERY_ALL_LUN_RESPONSE)
MAP_COMMAND_TO_FAKE_RESPONSE['lun/associate?TYPE=11&ASSOCIATEOBJTYPE=256'
'&ASSOCIATEOBJID=11/GET'] = (
FAKE_LUN_ASSOCIATE_RESPONSE)
MAP_COMMAND_TO_FAKE_RESPONSE['lun/associate?TYPE=11&ASSOCIATEOBJTYPE=256'
'&ASSOCIATEOBJID=12/GET'] = (
FAKE_LUN_ASSOCIATE_RESPONSE)
MAP_COMMAND_TO_FAKE_RESPONSE['lun/associate?ID=1&TYPE=11&ASSOCIATEOBJTYPE=21'
'&ASSOCIATEOBJID=0/GET'] = (
FAKE_LUN_ASSOCIATE_RESPONSE)
MAP_COMMAND_TO_FAKE_RESPONSE['lun/associate?TYPE=11&ASSOCIATEOBJTYPE=21'
'&ASSOCIATEOBJID=1/GET'] = (
FAKE_COMMON_SUCCESS_RESPONSE)
MAP_COMMAND_TO_FAKE_RESPONSE['lungroup?range=[0-8191]/GET'] = (
FAKE_QUERY_LUN_GROUP_INFO_RESPONSE)
MAP_COMMAND_TO_FAKE_RESPONSE['lungroup'] = (
FAKE_QUERY_LUN_GROUP_RESPONSE)
MAP_COMMAND_TO_FAKE_RESPONSE['lungroup/associate'] = (
FAKE_QUERY_LUN_GROUP_ASSOCIAT_RESPONSE)
MAP_COMMAND_TO_FAKE_RESPONSE['lungroup/associate?ID=11&ASSOCIATEOBJTYPE=11'
'&ASSOCIATEOBJID=1/DELETE'] = (
FAKE_COMMON_SUCCESS_RESPONSE)
MAP_COMMAND_TO_FAKE_RESPONSE['lungroup/associate?TYPE=256&ASSOCIATEOBJTYPE=11'
'&ASSOCIATEOBJID=11/GET'] = (
FAKE_LUN_ASSOCIATE_RESPONSE)
MAP_COMMAND_TO_FAKE_RESPONSE['lungroup/associate?ID=11&ASSOCIATEOBJTYPE=11'
'&ASSOCIATEOBJID=11/DELETE'] = (
FAKE_COMMON_SUCCESS_RESPONSE)
MAP_COMMAND_TO_FAKE_RESPONSE['lun/count?TYPE=11&ASSOCIATEOBJTYPE=256'
'&ASSOCIATEOBJID=11/GET'] = (
FAKE_LUN_COUNT_RESPONSE)
MAP_COMMAND_TO_FAKE_RESPONSE['lun/expand/PUT'] = (
FAKE_LUN_INFO_RESPONSE)
MAP_COMMAND_TO_FAKE_RESPONSE['lungroup/associate?ID=12&ASSOCIATEOBJTYPE=11'
'&ASSOCIATEOBJID=12/DELETE'] = (
FAKE_COMMON_SUCCESS_RESPONSE)
# mock snapshot info map
MAP_COMMAND_TO_FAKE_RESPONSE['snapshot'] = (
FAKE_CREATE_SNAPSHOT_INFO_RESPONSE)
# mock snapshot info map
MAP_COMMAND_TO_FAKE_RESPONSE['snapshot/11/GET'] = (
FAKE_GET_SNAPSHOT_INFO_RESPONSE)
MAP_COMMAND_TO_FAKE_RESPONSE['snapshot/activate'] = (
FAKE_COMMON_SUCCESS_RESPONSE)
MAP_COMMAND_TO_FAKE_RESPONSE['snapshot/stop/PUT'] = (
FAKE_COMMON_SUCCESS_RESPONSE)
MAP_COMMAND_TO_FAKE_RESPONSE['snapshot/11/DELETE'] = (
FAKE_COMMON_SUCCESS_RESPONSE)
MAP_COMMAND_TO_FAKE_RESPONSE['snapshot?range=[0-32767]/GET'] = (
FAKE_SNAPSHOT_LIST_INFO_RESPONSE)
# mock QoS info map
MAP_COMMAND_TO_FAKE_RESPONSE['ioclass/11/GET'] = (
FAKE_LUN_DELETE_SUCCESS_RESPONSE)
MAP_COMMAND_TO_FAKE_RESPONSE['ioclass/11/DELETE'] = (
FAKE_COMMON_SUCCESS_RESPONSE)
MAP_COMMAND_TO_FAKE_RESPONSE['ioclass/active/11/PUT'] = (
FAKE_COMMON_SUCCESS_RESPONSE)
# mock iscsi info map
MAP_COMMAND_TO_FAKE_RESPONSE['iscsi_tgt_port/GET'] = (
FAKE_GET_ISCSI_INFO_RESPONSE)
MAP_COMMAND_TO_FAKE_RESPONSE['eth_port/GET'] = (
FAKE_GET_ETH_INFO_RESPONSE)
MAP_COMMAND_TO_FAKE_RESPONSE['eth_port/associate?TYPE=213&ASSOCIATEOBJTYPE=257'
'&ASSOCIATEOBJID=11/GET'] = (
FAKE_GET_ETH_ASSOCIATE_RESPONSE)
MAP_COMMAND_TO_FAKE_RESPONSE['iscsidevicename'] = (
FAKE_GET_ISCSI_DEVICE_RESPONSE)
MAP_COMMAND_TO_FAKE_RESPONSE['iscsi_initiator?range=[0-256]/GET'] = (
FAKE_COMMON_SUCCESS_RESPONSE)
MAP_COMMAND_TO_FAKE_RESPONSE['iscsi_initiator/'] = (
FAKE_COMMON_SUCCESS_RESPONSE)
MAP_COMMAND_TO_FAKE_RESPONSE['iscsi_initiator/POST'] = (
FAKE_ISCSI_INITIATOR_RESPONSE)
MAP_COMMAND_TO_FAKE_RESPONSE['iscsi_initiator/PUT'] = (
FAKE_ISCSI_INITIATOR_RESPONSE)
MAP_COMMAND_TO_FAKE_RESPONSE['iscsi_initiator/'
'iqn.1993-08.debian:01:ec2bff7ac3a3/PUT'] = (
FAKE_ISCSI_INITIATOR_RESPONSE)
# mock host info map
MAP_COMMAND_TO_FAKE_RESPONSE['host?range=[0-65535]/GET'] = (
FAKE_GET_ALL_HOST_INFO_RESPONSE)
MAP_COMMAND_TO_FAKE_RESPONSE['hostgroup?range=[0-8191]/GET'] = (
FAKE_GET_ALL_HOST_GROUP_INFO_RESPONSE)
MAP_COMMAND_TO_FAKE_RESPONSE['hostgroup'] = (
FAKE_GET_HOST_GROUP_INFO_RESPONSE)
MAP_COMMAND_TO_FAKE_RESPONSE['host/associate?TYPE=21&ASSOCIATEOBJTYPE=14'
'&ASSOCIATEOBJID=0/GET'] = (
FAKE_COMMON_SUCCESS_RESPONSE)
MAP_COMMAND_TO_FAKE_RESPONSE['hostgroup/associate'] = (
FAKE_COMMON_SUCCESS_RESPONSE)
# mock copy info map
MAP_COMMAND_TO_FAKE_RESPONSE['luncopy'] = (
FAKE_GET_LUN_COPY_INFO_RESPONSE)
MAP_COMMAND_TO_FAKE_RESPONSE['LUNCOPY?range=[0-1023]/GET'] = (
FAKE_GET_LUN_COPY_LIST_INFO_RESPONSE)
MAP_COMMAND_TO_FAKE_RESPONSE['LUNCOPY/start/PUT'] = (
FAKE_COMMON_SUCCESS_RESPONSE)
MAP_COMMAND_TO_FAKE_RESPONSE['LUNCOPY/0/DELETE'] = (
FAKE_COMMON_SUCCESS_RESPONSE)
# mock mapping view info map
MAP_COMMAND_TO_FAKE_RESPONSE['mappingview?range=[0-8191]/GET'] = (
FAKE_GET_MAPPING_VIEW_INFO_RESPONSE)
MAP_COMMAND_TO_FAKE_RESPONSE['mappingview'] = (
FAKE_GET_MAPPING_VIEW_RESPONSE)
MAP_COMMAND_TO_FAKE_RESPONSE['MAPPINGVIEW/CREATE_ASSOCIATE/PUT'] = (
FAKE_COMMON_SUCCESS_RESPONSE)
# mock FC info map
MAP_COMMAND_TO_FAKE_RESPONSE['fc_initiator?ISFREE=true&range=[0-8191]/GET'] = (
FAKE_FC_INFO_RESPONSE)
MAP_COMMAND_TO_FAKE_RESPONSE['fc_initiator/10000090fa0d6754/PUT'] = (
FAKE_COMMON_SUCCESS_RESPONSE)
MAP_COMMAND_TO_FAKE_RESPONSE['host_link?INITIATOR_TYPE=223'
'&INITIATOR_PORT_WWN=10000090fa0d6754/GET'] = (
FAKE_HOST_LINK_RESPONSE)
MAP_COMMAND_TO_FAKE_RESPONSE['portgroup?range=[0-8191]&TYPE=257/GET'] = (
FAKE_PORT_GROUP_RESPONSE)
def Fake_sleep(time):
pass
class Fake18000Client(rest_client.RestClient):
def __init__(self, configuration):
rest_client.RestClient.__init__(self, configuration)
self.delete_flag = False
self.terminateFlag = False
self.deviceid = None
self.test_fail = False
self.checkFlag = False
self.remove_chap_flag = False
def _change_file_mode(self, filepath):
pass
def _parse_volume_type(self, volume):
poolinfo = self._find_pool_info()
volume_size = self._get_volume_size(poolinfo, volume)
params = {'LUNType': 0,
'WriteType': '1',
'PrefetchType': '3',
'qos_level': 'Qos-high',
'StripUnitSize': '64',
'PrefetchValue': '0',
'PrefetchTimes': '0',
'qos': 'OpenStack_Qos_High',
'MirrorSwitch': '1',
'tier': 'Tier_high',
}
params['volume_size'] = volume_size
params['pool_id'] = poolinfo['ID']
return params
def _get_snapshotid_by_name(self, snapshot_name):
return "11"
def _check_snapshot_exist(self, snapshot_id):
return True
def call(self, url=False, data=None, method=None):
url = url.replace('http://100.115.10.69:8082/deviceManager/rest', '')
command = url.replace('/210235G7J20000000000/', '')
data = None
if method:
command = command + "/" + method
for item in MAP_COMMAND_TO_FAKE_RESPONSE.keys():
if command == item:
data = MAP_COMMAND_TO_FAKE_RESPONSE[item]
if self.test_fail:
data = FAKE_ERROR_INFO_RESPONSE
if command == 'lun/11/GET':
data = FAKE_ERROR_LUN_INFO_RESPONSE
self.test_fail = False
return json.loads(data)
class Fake18000ISCSIStorage(huawei_driver.Huawei18000ISCSIDriver):
"""Fake Huawei Storage, Rewrite some methods of HuaweiISCSIDriver."""
def __init__(self, configuration):
self.configuration = configuration
self.xml_file_path = self.configuration.cinder_huawei_conf_file
def do_setup(self):
self.restclient = Fake18000Client(configuration=self.configuration)
class Fake18000FCStorage(huawei_driver.Huawei18000FCDriver):
"""Fake Huawei Storage, Rewrite some methods of HuaweiISCSIDriver."""
def __init__(self, configuration):
self.configuration = configuration
self.xml_file_path = self.configuration.cinder_huawei_conf_file
def do_setup(self):
self.restclient = Fake18000Client(configuration=self.configuration)
class Huawei18000ISCSIDriverTestCase(test.TestCase):
def setUp(self):
super(Huawei18000ISCSIDriverTestCase, self).setUp()
self.tmp_dir = tempfile.mkdtemp()
self.fake_conf_file = self.tmp_dir + '/cinder_huawei_conf.xml'
self.addCleanup(shutil.rmtree, self.tmp_dir)
self.create_fake_conf_file()
self.addCleanup(os.remove, self.fake_conf_file)
self.configuration = mock.Mock(spec=conf.Configuration)
self.configuration.cinder_huawei_conf_file = self.fake_conf_file
self.xml_file_path = self.configuration.cinder_huawei_conf_file
self.stubs.Set(time, 'sleep', Fake_sleep)
driver = Fake18000ISCSIStorage(configuration=self.configuration)
self.driver = driver
self.driver.do_setup()
self.portgroup = 'portgroup-test'
self.iscsi_iqns = ['iqn.2006-08.com.huawei:oceanstor:21000022a:'
':20500:198.100.10.1',
'iqn.2006-08.com.huawei:oceanstor:21000022a:'
':20503:198.100.10.2']
self.target_ips = ['198.100.10.1',
'198.100.10.2']
self.portgroup_id = 11
def test_login_success(self):
deviceid = self.driver.restclient.login()
self.assertEqual('210235G7J20000000000', deviceid)
def test_create_volume_success(self):
self.driver.restclient.login()
lun_info = self.driver.create_volume(test_volume)
self.assertEqual('1', lun_info['provider_location'])
def test_delete_volume_success(self):
self.driver.restclient.login()
delete_flag = self.driver.delete_volume(test_volume)
self.assertTrue(delete_flag)
def test_create_snapshot_success(self):
self.driver.restclient.login()
lun_info = self.driver.create_snapshot(test_volume)
self.assertEqual(11, lun_info['provider_location'])
def test_delete_snapshot_success(self):
self.driver.restclient.login()
delete_flag = self.driver.delete_snapshot(test_snap)
self.assertTrue(delete_flag)
def test_create_volume_from_snapsuccess(self):
self.driver.restclient.login()
lun_info = self.driver.create_volume_from_snapshot(test_volume,
test_volume)
self.assertEqual('1', lun_info['provider_location'])
def test_initialize_connection_success(self):
self.driver.restclient.login()
iscsi_properties = self.driver.initialize_connection(test_volume,
FakeConnector)
self.assertEqual(1, iscsi_properties['data']['target_lun'])
def test_terminate_connection_success(self):
self.driver.restclient.login()
self.driver.restclient.terminateFlag = True
self.driver.terminate_connection(test_volume, FakeConnector)
self.assertTrue(self.driver.restclient.terminateFlag)
def test_get_volume_status(self):
self.driver.restclient.login()
data = self.driver.get_volume_stats()
self.assertEqual('1.1.1', data['driver_version'])
def test_extend_volume(self):
self.driver.restclient.login()
lun_info = self.driver.extend_volume(test_volume, 3)
self.assertEqual('1', lun_info['provider_location'])
def test_login_fail(self):
self.driver.restclient.test_fail = True
self.assertRaises(exception.VolumeBackendAPIException,
self.driver.restclient.login)
def test_create_snapshot_fail(self):
self.driver.restclient.login()
self.driver.restclient.test_fail = True
self.assertRaises(exception.VolumeBackendAPIException,
self.driver.create_snapshot, test_volume)
def test_create_volume_fail(self):
self.driver.restclient.login()
self.driver.restclient.test_fail = True
self.assertRaises(exception.VolumeBackendAPIException,
self.driver.create_volume, test_volume)
self.assertRaises(exception.VolumeBackendAPIException,
self.driver.create_volume, error_volume)
def test_delete_volume_fail(self):
self.driver.restclient.login()
self.driver.restclient.test_fail = True
delete_flag = self.driver.delete_volume(test_volume)
self.assertTrue(delete_flag)
def test_delete_snapshot_fail(self):
self.driver.restclient.login()
self.driver.restclient.test_fail = True
delete_flag = self.driver.delete_volume(test_snap)
self.assertTrue(delete_flag)
def test_initialize_connection_fail(self):
self.driver.restclient.login()
self.driver.restclient.test_fail = True
self.assertRaises(exception.VolumeBackendAPIException,
self.driver.initialize_connection,
test_volume, FakeConnector)
def test_get_default_timeout(self):
result = huawei_utils.get_default_timeout(self.xml_file_path)
self.assertEqual('43200', result)
def test_get_wait_interval(self):
result = huawei_utils.get_wait_interval(self.xml_file_path,
'LUNReadyWaitInterval')
self.assertEqual(2, result)
def test_lun_is_associated_to_lungroup(self):
self.driver.restclient.login()
self.driver.restclient.associate_lun_to_lungroup('11', '11')
result = self.driver.restclient._is_lun_associated_to_lungroup('11',
'11')
self.assertTrue(result)
def test_lun_is_not_associated_to_lun_group(self):
self.driver.restclient.login()
self.driver.restclient.associate_lun_to_lungroup('12', '12')
self.driver.restclient.remove_lun_from_lungroup('12', '12')
result = self.driver.restclient._is_lun_associated_to_lungroup('12',
'12')
self.assertFalse(result)
def test_get_tgtip(self):
self.driver.restclient.login()
portg_id = self.driver.restclient.find_tgt_port_group(self.portgroup)
result = self.driver.restclient._get_tgt_ip_from_portgroup(portg_id)
self.assertEqual(self.target_ips, result)
def test_get_iscsi_params(self):
self.driver.restclient.login()
(iscsi_iqns, target_ips, portgroup_id) = (
self.driver.restclient.get_iscsi_params(self.xml_file_path,
FakeConnector))
self.assertEqual(self.iscsi_iqns, iscsi_iqns)
self.assertEqual(self.target_ips, target_ips)
self.assertEqual(self.portgroup_id, portgroup_id)
def test_get_lun_conf_params(self):
self.driver.restclient.login()
luninfo = huawei_utils.get_lun_conf_params(self.xml_file_path)
luninfo['pool_id'] = '0'
luninfo['volume_size'] = 2
luninfo['volume_description'] = 'test volume'
luninfo = huawei_utils.init_lun_parameters('5mFHcBv4RkCcD+JyrWc0SA',
luninfo)
self.assertEqual('5mFHcBv4RkCcD+JyrWc0SA', luninfo['NAME'])
def tset_get_iscsi_conf(self):
self.driver.restclient.login()
iscsiinfo = huawei_utils.get_iscsi_conf(self.xml_file_path)
self.assertEqual('iqn.1993-08.debian:01:ec2bff7ac3a3',
iscsiinfo['Initiator'])
def test_check_conf_file(self):
self.driver.restclient.login()
self.driver.restclient.checkFlag = True
huawei_utils.check_conf_file(self.xml_file_path)
self.assertTrue(self.driver.restclient.checkFlag)
def test_get_conf_host_os_type(self):
self.driver.restclient.login()
host_os = huawei_utils.get_conf_host_os_type('100.97.10.30',
self.configuration)
self.assertEqual('0', host_os)
def test_find_chap_info(self):
self.driver.restclient.login()
tmp_dict = {}
iscsi_info = {}
tmp_dict['Name'] = 'iqn.1993-08.debian:01:ec2bff7ac3a3'
tmp_dict['CHAPinfo'] = 'mm-user;mm-user@storage'
ini_list = [tmp_dict]
iscsi_info['Initiator'] = ini_list
initiator_name = FakeConnector['initiator']
chapinfo = self.driver.restclient.find_chap_info(iscsi_info,
initiator_name)
chap_username, chap_password = chapinfo.split(';')
self.assertEqual('mm-user', chap_username)
self.assertEqual('mm-user@storage', chap_password)
def test_find_alua_info(self):
self.driver.restclient.login()
tmp_dict = {}
iscsi_info = {}
tmp_dict['Name'] = 'iqn.1993-08.debian:01:ec2bff7ac3a3'
tmp_dict['ALUA'] = '1'
ini_list = [tmp_dict]
iscsi_info['Initiator'] = ini_list
initiator_name = FakeConnector['initiator']
type = self.driver.restclient._find_alua_info(iscsi_info,
initiator_name)
self.assertEqual('1', type)
def test_find_pool_info(self):
self.driver.restclient.login()
pools = {
"error": {"code": 0},
"data": [{
"NAME": "test001",
"ID": "0",
"USERFREECAPACITY": "36",
"USERTOTALCAPACITY": "48",
"USAGETYPE": constants.BLOCK_STORAGE_POOL_TYPE},
{"NAME": "test002",
"ID": "1",
"USERFREECAPACITY": "37",
"USERTOTALCAPACITY": "49",
"USAGETYPE": constants.FILE_SYSTEM_POOL_TYPE}]}
pool_name = 'test001'
test_info = {'CAPACITY': '36', 'ID': '0', 'TOTALCAPACITY': '48'}
pool_info = self.driver.restclient.find_pool_info(pool_name, pools)
self.assertEqual(test_info, pool_info)
pool_name = 'test002'
test_info = {}
pool_info = self.driver.restclient.find_pool_info(pool_name, pools)
self.assertEqual(test_info, pool_info)
pool_name = 'test000'
test_info = {}
pool_info = self.driver.restclient.find_pool_info(pool_name, pools)
self.assertEqual(test_info, pool_info)
def create_fake_conf_file(self):
"""Create a fake Config file.
Huawei storage customize a XML configuration file, the configuration
file is used to set the Huawei storage custom parameters, therefore,
in the UT test we need to simulate such a configuration file.
"""
doc = minidom.Document()
config = doc.createElement('config')
doc.appendChild(config)
storage = doc.createElement('Storage')
config.appendChild(storage)
controllerip0 = doc.createElement('ControllerIP0')
controllerip0_text = doc.createTextNode('10.10.10.1')
controllerip0.appendChild(controllerip0_text)
storage.appendChild(controllerip0)
controllerip1 = doc.createElement('ControllerIP1')
controllerip1_text = doc.createTextNode('10.10.10.2')
controllerip1.appendChild(controllerip1_text)
storage.appendChild(controllerip1)
username = doc.createElement('UserName')
username_text = doc.createTextNode('admin')
username.appendChild(username_text)
storage.appendChild(username)
userpassword = doc.createElement('UserPassword')
userpassword_text = doc.createTextNode('Admin@storage')
userpassword.appendChild(userpassword_text)
storage.appendChild(userpassword)
url = doc.createElement('RestURL')
url_text = doc.createTextNode('http://100.115.10.69:8082/'
'deviceManager/rest/')
url.appendChild(url_text)
storage.appendChild(url)
storagepool = doc.createElement('StoragePool')
pool_text = doc.createTextNode('OpenStack_Pool')
storagepool.appendChild(pool_text)
storage.appendChild(storagepool)
lun = doc.createElement('LUN')
config.appendChild(lun)
storagepool = doc.createElement('StoragePool')
pool_text = doc.createTextNode('OpenStack_Pool')
storagepool.appendChild(pool_text)
lun.appendChild(storagepool)
timeout = doc.createElement('Timeout')
timeout_text = doc.createTextNode('43200')
timeout.appendChild(timeout_text)
lun.appendChild(timeout)
lun_ready_wait_interval = doc.createElement('LUNReadyWaitInterval')
lun_ready_wait_interval_text = doc.createTextNode('2')
lun_ready_wait_interval.appendChild(lun_ready_wait_interval_text)
lun.appendChild(lun_ready_wait_interval)
prefetch = doc.createElement('Prefetch')
prefetch.setAttribute('Type', '1')
prefetch.setAttribute('Value', '0')
lun.appendChild(prefetch)
iscsi = doc.createElement('iSCSI')
config.appendChild(iscsi)
defaulttargetip = doc.createElement('DefaultTargetIP')
defaulttargetip_text = doc.createTextNode('100.115.10.68')
defaulttargetip.appendChild(defaulttargetip_text)
iscsi.appendChild(defaulttargetip)
initiator = doc.createElement('Initiator')
initiator.setAttribute('Name', 'iqn.1993-08.debian:01:ec2bff7ac3a3')
initiator.setAttribute('TargetIP', '192.168.100.2')
initiator.setAttribute('CHAPinfo', 'mm-user;mm-user@storage')
initiator.setAttribute('ALUA', '1')
initiator.setAttribute('TargetPortGroup', 'portgroup-test')
iscsi.appendChild(initiator)
host = doc.createElement('Host')
host.setAttribute('HostIP', '100.97.10.30')
host.setAttribute('OSType', 'Linux')
config.appendChild(host)
fakefile = open(self.fake_conf_file, 'w')
fakefile.write(doc.toprettyxml(indent=''))
fakefile.close()
class Huawei18000FCDriverTestCase(test.TestCase):
def setUp(self):
super(Huawei18000FCDriverTestCase, self).setUp()
self.tmp_dir = tempfile.mkdtemp()
self.fake_conf_file = self.tmp_dir + '/cinder_huawei_conf.xml'
self.addCleanup(shutil.rmtree, self.tmp_dir)
self.create_fake_conf_file()
self.addCleanup(os.remove, self.fake_conf_file)
self.configuration = mock.Mock(spec=conf.Configuration)
self.configuration.cinder_huawei_conf_file = self.fake_conf_file
self.xml_file_path = self.configuration.cinder_huawei_conf_file
self.stubs.Set(time, 'sleep', Fake_sleep)
driver = Fake18000FCStorage(configuration=self.configuration)
self.driver = driver
self.driver.do_setup()
def test_login_success(self):
deviceid = self.driver.restclient.login()
self.assertEqual('210235G7J20000000000', deviceid)
def test_create_volume_success(self):
self.driver.restclient.login()
lun_info = self.driver.create_volume(test_volume)
self.assertEqual('1', lun_info['provider_location'])
def test_delete_volume_success(self):
self.driver.restclient.login()
delete_flag = self.driver.delete_volume(test_volume)
self.assertTrue(delete_flag)
def test_create_snapshot_success(self):
self.driver.restclient.login()
lun_info = self.driver.create_snapshot(test_volume)
self.assertEqual(11, lun_info['provider_location'])
def test_delete_snapshot_success(self):
self.driver.restclient.login()
delete_flag = self.driver.delete_snapshot(test_snap)
self.assertTrue(delete_flag)
def test_create_volume_from_snapsuccess(self):
self.driver.restclient.login()
lun_info = self.driver.create_volume_from_snapshot(test_volume,
test_volume)
self.assertEqual('1', lun_info['provider_location'])
def test_initialize_connection_success(self):
self.driver.restclient.login()
iscsi_properties = self.driver.initialize_connection(test_volume,
FakeConnector)
self.assertEqual(1, iscsi_properties['data']['target_lun'])
def test_terminate_connection_success(self):
self.driver.restclient.login()
self.driver.restclient.terminateFlag = True
self.driver.terminate_connection(test_volume, FakeConnector)
self.assertTrue(self.driver.restclient.terminateFlag)
def test_get_volume_status(self):
self.driver.restclient.login()
data = self.driver.get_volume_stats()
self.assertEqual('1.1.1', data['driver_version'])
def test_extend_volume(self):
self.driver.restclient.login()
lun_info = self.driver.extend_volume(test_volume, 3)
self.assertEqual('1', lun_info['provider_location'])
def test_login_fail(self):
self.driver.restclient.test_fail = True
self.assertRaises(exception.VolumeBackendAPIException,
self.driver.restclient.login)
def test_create_snapshot_fail(self):
self.driver.restclient.login()
self.driver.restclient.test_fail = True
self.assertRaises(exception.VolumeBackendAPIException,
self.driver.create_snapshot, test_volume)
def test_create_volume_fail(self):
self.driver.restclient.login()
self.driver.restclient.test_fail = True
self.assertRaises(exception.VolumeBackendAPIException,
self.driver.create_volume, test_volume)
self.assertRaises(exception.VolumeBackendAPIException,
self.driver.create_volume, error_volume)
def test_delete_volume_fail(self):
self.driver.restclient.login()
self.driver.restclient.test_fail = True
delete_flag = self.driver.delete_volume(test_volume)
self.assertTrue(delete_flag)
def test_delete_snapshot_fail(self):
self.driver.restclient.login()
self.driver.restclient.test_fail = True
delete_flag = self.driver.delete_snapshot(test_snap)
self.assertTrue(delete_flag)
def test_initialize_connection_fail(self):
self.driver.restclient.login()
self.driver.restclient.test_fail = True
self.assertRaises(exception.VolumeBackendAPIException,
self.driver.initialize_connection,
test_volume, FakeConnector)
def test_get_default_timeout(self):
result = huawei_utils.get_default_timeout(self.xml_file_path)
self.assertEqual('43200', result)
def test_get_wait_interval(self):
result = huawei_utils.get_wait_interval(self.xml_file_path,
'LUNReadyWaitInterval')
self.assertEqual(2, result)
def test_lun_is_associated_to_lungroup(self):
self.driver.restclient.login()
self.driver.restclient.associate_lun_to_lungroup('11', '11')
result = self.driver.restclient._is_lun_associated_to_lungroup('11',
'11')
self.assertTrue(result)
def test_lun_is_not_associated_to_lun_group(self):
self.driver.restclient.login()
self.driver.restclient.associate_lun_to_lungroup('12', '12')
self.driver.restclient.remove_lun_from_lungroup('12', '12')
result = self.driver.restclient._is_lun_associated_to_lungroup('12',
'12')
self.assertFalse(result)
def test_get_lun_conf_params(self):
self.driver.restclient.login()
luninfo = huawei_utils.get_lun_conf_params(self.xml_file_path)
luninfo['pool_id'] = '0'
luninfo['volume_size'] = 2
luninfo['volume_description'] = 'test volume'
luninfo = huawei_utils.init_lun_parameters('5mFHcBv4RkCcD+JyrWc0SA',
luninfo)
self.assertEqual('5mFHcBv4RkCcD+JyrWc0SA', luninfo['NAME'])
def test_check_conf_file(self):
self.driver.restclient.login()
self.driver.restclient.checkFlag = True
huawei_utils.check_conf_file(self.xml_file_path)
self.assertTrue(self.driver.restclient.checkFlag)
def test_get_conf_host_os_type(self):
self.driver.restclient.login()
host_os = huawei_utils.get_conf_host_os_type('100.97.10.30',
self.configuration)
self.assertEqual('0', host_os)
def create_fake_conf_file(self):
"""Create a fake Config file
Huawei storage customize a XML configuration file,
the configuration file is used to set the Huawei storage custom
parameters, therefore, in the UT test we need to simulate such a
configuration file
"""
doc = minidom.Document()
config = doc.createElement('config')
doc.appendChild(config)
storage = doc.createElement('Storage')
config.appendChild(storage)
controllerip0 = doc.createElement('ControllerIP0')
controllerip0_text = doc.createTextNode('10.10.10.1')
controllerip0.appendChild(controllerip0_text)
storage.appendChild(controllerip0)
controllerip1 = doc.createElement('ControllerIP1')
controllerip1_text = doc.createTextNode('10.10.10.2')
controllerip1.appendChild(controllerip1_text)
storage.appendChild(controllerip1)
username = doc.createElement('UserName')
username_text = doc.createTextNode('admin')
username.appendChild(username_text)
storage.appendChild(username)
userpassword = doc.createElement('UserPassword')
userpassword_text = doc.createTextNode('Admin@storage')
userpassword.appendChild(userpassword_text)
storage.appendChild(userpassword)
url = doc.createElement('RestURL')
url_text = doc.createTextNode('http://100.115.10.69:8082/'
'deviceManager/rest/')
url.appendChild(url_text)
storage.appendChild(url)
storagepool = doc.createElement('StoragePool')
pool_text = doc.createTextNode('OpenStack_Pool')
storagepool.appendChild(pool_text)
storage.appendChild(storagepool)
lun = doc.createElement('LUN')
config.appendChild(lun)
storagepool = doc.createElement('StoragePool')
pool_text = doc.createTextNode('OpenStack_Pool')
storagepool.appendChild(pool_text)
lun.appendChild(storagepool)
timeout = doc.createElement('Timeout')
timeout_text = doc.createTextNode('43200')
timeout.appendChild(timeout_text)
lun.appendChild(timeout)
lun_ready_wait_interval = doc.createElement('LUNReadyWaitInterval')
lun_ready_wait_interval_text = doc.createTextNode('2')
lun_ready_wait_interval.appendChild(lun_ready_wait_interval_text)
lun.appendChild(lun_ready_wait_interval)
iscsi = doc.createElement('iSCSI')
config.appendChild(iscsi)
defaulttargetip = doc.createElement('DefaultTargetIP')
defaulttargetip_text = doc.createTextNode('100.115.10.68')
defaulttargetip.appendChild(defaulttargetip_text)
iscsi.appendChild(defaulttargetip)
initiator = doc.createElement('Initiator')
initiator.setAttribute('Name', 'iqn.1993-08.debian:01:ec2bff7ac3a3')
initiator.setAttribute('TargetIP', '192.168.100.2')
iscsi.appendChild(initiator)
prefetch = doc.createElement('Prefetch')
prefetch.setAttribute('Type', '1')
prefetch.setAttribute('Value', '0')
lun.appendChild(prefetch)
host = doc.createElement('Host')
host.setAttribute('HostIP', '100.97.10.30')
host.setAttribute('OSType', 'Linux')
config.appendChild(host)
fakefile = open(self.fake_conf_file, 'w')
fakefile.write(doc.toprettyxml(indent=''))
fakefile.close()
|
|
# -*- coding: utf-8 -*-
"""
Written by Daniel M. Aukes.
Email: danaukes<at>seas.harvard.edu.
Please see LICENSE.txt for full license.
"""
import popupcad
import dev_tools
import types
import sys
import popupcad_manufacturing_plugins
#load external modules
import PySide
import numpy
import shapely
modules_before = sys.modules.copy()
def make_new_module(name):
new_module = types.ModuleType(name)
sys.modules[name] = new_module
exec('{0}=new_module'.format(name))
def load_local(source_location, name):
# print(name)
exec('from {0} import {1}'.format(source_location, name))
def remap_module(source_location, dest_location):
# print(source_location,dest_location)
exec('{0}={1}'.format(dest_location, source_location))
exec('sys.modules["{0}"]={1}'.format(dest_location, source_location))
def remap_class(source_location, dest_location):
exec('{0}={1}'.format(dest_location, source_location))
local_modules0 = []
local_modules0.append('multivalueoperation2')
local_modules0.append('sketchoperation2')
modules_remap0 = []
my_manufacturing_modules0 = []
my_manufacturing_modules0.append('multivalueoperation2')
my_manufacturing_modules0.append('sketchoperation2')
for module in my_manufacturing_modules0:
modules_remap0.append((module, 'popupcad.manufacturing.' + module))
modules_remap0.append((module, 'popupcad.plugins.manufacturing.' + module))
modules_remap0.append((module, 'popupcad_manufacturing_plugins.' + module))
local_modules = []
local_modules.append('autoweb3')
local_modules.append('bufferop2')
local_modules.append('cleanup')
local_modules.append('customshapely')
local_modules.append('customsupport2')
local_modules.append('customsupport3')
local_modules.append('cutop')
local_modules.append('cutop2')
local_modules.append('genericpolygon')
local_modules.append('identifybodies')
local_modules.append('identifyrigidbodies')
local_modules.append('jointop')
local_modules.append('keepout2')
local_modules.append('laminateoperation')
local_modules.append('layerop')
#local_modules.append('materials')
# local_modules.append('locateoperation')
local_modules.append('locateoperation2')
local_modules.append('outersheet2')
local_modules.append('placeop4')
local_modules.append('placeop5')
local_modules.append('placeop6')
local_modules.append('placeop7')
local_modules.append('removability')
local_modules.append('scrapoperation')
local_modules.append('shiftflip2')
local_modules.append('simplify')
local_modules.append('sketchoperation')
local_modules.append('supportcandidate3')
local_modules.append('toolclearance2')
local_modules.append('toolclearance3')
new_modules = []
new_modules.append('popupcad.plugins')
new_modules.append('popupcad.plugins.manufacturing')
new_modules.append('popupcad.constraints')
#new_modules.append('popupcad.materials')
# new_modules.append('popupcad.manufacturing.freeze')
my_manufacturing_modules = []
# my_manufacturing_modules.append('multivalueoperation2')
my_manufacturing_modules.append('autoweb3')
my_manufacturing_modules.append('bufferop2')
my_manufacturing_modules.append('cleanup')
my_manufacturing_modules.append('customsupport2')
my_manufacturing_modules.append('customsupport3')
my_manufacturing_modules.append('cutop')
my_manufacturing_modules.append('cutop2')
my_manufacturing_modules.append('identifybodies')
my_manufacturing_modules.append('identifyrigidbodies')
my_manufacturing_modules.append('jointop')
my_manufacturing_modules.append('keepout2')
my_manufacturing_modules.append('laminateoperation')
my_manufacturing_modules.append('layerop')
# my_manufacturing_modules.append('locateoperation')
my_manufacturing_modules.append('locateoperation2')
my_manufacturing_modules.append('outersheet2')
my_manufacturing_modules.append('placeop4')
my_manufacturing_modules.append('placeop5')
my_manufacturing_modules.append('placeop6')
my_manufacturing_modules.append('placeop7')
my_manufacturing_modules.append('removability')
my_manufacturing_modules.append('scrapoperation')
my_manufacturing_modules.append('shiftflip2')
my_manufacturing_modules.append('simplify')
my_manufacturing_modules.append('sketchoperation')
my_manufacturing_modules.append('supportcandidate3')
my_manufacturing_modules.append('toolclearance2')
my_manufacturing_modules.append('toolclearance3')
modules_remap = []
modules_remap.append(
('locateoperation2',
'popupcad.manufacturing.locateoperation'))
for module in my_manufacturing_modules:
modules_remap.append((module, 'popupcad.manufacturing.' + module))
modules_remap.append((module, 'popupcad.plugins.manufacturing.' + module))
modules_remap.append(
(module,
'popupcad_manufacturing_plugins.manufacturing.' +
module))
modules_remap.append(
('popupcad_manufacturing_plugins.manufacturing.cutop2',
'popupcad.manufacturing.cutop2'))
modules_remap.append(
('popupcad.filetypes.laminate',
'popupcad.materials.laminatesheet'))
modules_remap.append(
('popupcad.filetypes.genericshapes',
'popupcad.geometry.genericpolygon'))
modules_remap.append(
('popupcad.filetypes.genericshapebase',
'popupcad.geometry.genericshapebase'))
modules_remap.append(
('popupcad.filetypes.constraints',
'popupcad.constraints.constraints'))
modules_remap.append(
('popupcad.filetypes.constraints',
'dev_tools.constraints'))
modules_remap.append(
('popupcad.manufacturing.freeze',
'popupcad.manufacturing.flatten'))
modules_remap.append(
('customshapely',
'popupcad.geometry.customshapely'))
#modules_remap.append(
# ('materials',
# 'popupcad.materials.materials'))
classes_remap = []
classes_remap.append(
('popupcad.geometry.vertex.ShapeVertex',
'popupcad.geometry.vertex.Vertex'))
classes_remap.append(
('genericpolygon.GenericShape',
'popupcad.filetypes.genericshapes.GenericShape'))
classes_remap.append(
('popupcad.filetypes.layerdef.LayerDef',
'popupcad.materials.LayerDef'))
classes_remap.append(
('popupcad.filetypes.layerdef.LayerDef',
'popupcad.materials.materials.LayerDef'))
classes_remap.append(
('popupcad.filetypes.layer.Layer',
'popupcad.filetypes.laminate.Layer'))
classes_remap.append(
('locateoperation2.LocateOperation2',
'popupcad.manufacturing.locateoperation2.LocateOperation'))
classes_remap.append(
('popupcad.manufacturing.freeze.Freeze',
'popupcad.manufacturing.flatten.Flatten'))
# load_local('.','multivalueoperation2')
# remap_module('multivalueoperation2','popupcad.manufacturing.multivalueoperation2')
for module in new_modules:
make_new_module(module)
for module in local_modules0:
load_local('.', module)
for item in modules_remap0:
remap_module(*item)
for module in local_modules:
load_local('.', module)
for item in modules_remap:
remap_module(*item)
for item in classes_remap:
remap_class(*item)
modules_after = sys.modules.copy()
modules_diff = list(set(modules_after.keys()) - set(modules_before.keys()))
Vertex = popupcad.geometry.vertex.Vertex
|
|
# Copyright Hybrid Logic Ltd. See LICENSE file for details.
# -*- test-case-name: flocker.node.test.test_deploy -*-
"""
Deploy applications on nodes.
"""
from itertools import chain
from warnings import warn
from zope.interface import Interface, implementer, Attribute
from characteristic import attributes
from pyrsistent import PRecord, field
from eliot import write_failure, Logger, start_action
from twisted.internet.defer import gatherResults, fail, succeed
from ._docker import DockerClient, PortMap, Environment, Volume as DockerVolume
from . import IStateChange, in_parallel, sequentially
from ..control._model import (
Application, DatasetChanges, AttachedVolume, DatasetHandoff,
NodeState, DockerImage, Port, Link, Manifestation, Dataset,
pset_field, ip_to_uuid
)
from ..route import make_host_network, Proxy, OpenPort
from ..volume._ipc import RemoteVolumeManager, standard_node
from ..volume._model import VolumeSize
from ..volume.service import VolumeName
from ..common import gather_deferreds
_logger = Logger()
def _to_volume_name(dataset_id):
"""
Convert dataset ID to ``VolumeName`` with ``u"default"`` namespace.
To be replaced in https://clusterhq.atlassian.net/browse/FLOC-737 with
real namespace support.
:param unicode dataset_id: Dataset ID.
:return: ``VolumeName`` with default namespace.
"""
return VolumeName(namespace=u"default", dataset_id=dataset_id)
class IDeployer(Interface):
"""
An object that can discover local state and calculate necessary
changes to bring local state and desired cluster configuration into
alignment.
:ivar UUID node_uuid: The UUID of the node this deployer is running.
:ivar unicode hostname: The hostname (really, IP) of the node this
deployer is managing.
"""
node_uuid = Attribute("The UUID of thise node, a ``UUID`` instance.")
hostname = Attribute("The public IP address of this node.")
def discover_state(local_state):
"""
Discover the local state, i.e. the state which is exclusively under
the purview of the convergence agent running this instance.
:param NodeState local_state: The previously known state of this
node. This may include information that this deployer cannot
discover on its own. Information here should NOT be copied
into the result; the return result should include only
information discovered by this particular deployer.
:return: A ``Deferred`` which fires with a tuple of
``IClusterStateChange`` providers describing
local state. These objects will be passed to the control
service (see ``flocker.control._protocol``) and may also be
passed to this object's ``calculate_changes()`` method.
"""
def calculate_changes(configuration, cluster_state):
"""
Calculate the state changes necessary to make the local state match the
desired cluster configuration.
:param Deployment configuration: The intended configuration of all
nodes.
:param DeploymentState cluster_state: The current state of all nodes
already updated with recent output of ``discover_state``.
:return: An ``IStateChange`` provider.
"""
def _eliot_system(part):
return u"flocker:p2pdeployer:" + part
@implementer(IStateChange)
class StartApplication(PRecord):
"""
Launch the supplied application as a container.
:ivar Application application: The ``Application`` to create and
start.
:ivar NodeState node_state: The state of the node the ``Application``
is running on.
"""
application = field(type=Application, mandatory=True)
node_state = field(type=NodeState, mandatory=True)
# This (and other eliot_action implementations) uses `start_action` because
# it was easier than defining a new `ActionType` with a bunch of fields.
# It might be worth doing that work eventually, though. Also, this can
# turn into a regular attribute when the `_logger` argument is no longer
# required by Eliot.
@property
def eliot_action(self):
return start_action(
_logger, _eliot_system(u"startapplication"),
name=self.application.name,
)
def run(self, deployer):
application = self.application
volumes = []
if application.volume is not None:
dataset_id = application.volume.manifestation.dataset_id
volumes.append(DockerVolume(
container_path=application.volume.mountpoint,
node_path=self.node_state.paths[dataset_id]))
if application.ports is not None:
port_maps = map(lambda p: PortMap(internal_port=p.internal_port,
external_port=p.external_port),
application.ports)
else:
port_maps = []
environment = {}
for link in application.links:
environment.update(_link_environment(
protocol=u"tcp",
alias=link.alias,
local_port=link.local_port,
hostname=self.node_state.hostname,
remote_port=link.remote_port,
))
if application.environment is not None:
environment.update(application.environment)
if environment:
docker_environment = Environment(
variables=frozenset(environment.iteritems()))
else:
docker_environment = None
return deployer.docker_client.add(
application.name,
application.image.full_name,
ports=port_maps,
environment=docker_environment,
volumes=volumes,
mem_limit=application.memory_limit,
cpu_shares=application.cpu_shares,
restart_policy=application.restart_policy,
command_line=application.command_line,
)
def _link_environment(protocol, alias, local_port, hostname, remote_port):
"""
Generate the environment variables used for defining a docker link.
Docker containers expect an enviroment variable
`<alias>_PORT_<local_port>_TCP`` which contains the URL of the remote end
of a link, as well as parsed variants ``_ADDR``, ``_PORT``, ``_PROTO``.
:param unicode protocol: The protocol used for the link.
:param unicode alias: The name of the link.
:param int local_port: The port the local application expects to access.
:param unicode hostname: The remote hostname to connect to.
:param int remote_port: The remote port to connect to.
"""
alias = alias.upper()
base = u'%s_PORT_%d_%s' % (alias, local_port, protocol.upper())
return {
base: u'%s://%s:%d' % (protocol, hostname, remote_port),
base + u'_ADDR': hostname,
base + u'_PORT': u'%d' % (remote_port,),
base + u'_PROTO': protocol,
}
@implementer(IStateChange)
class StopApplication(PRecord):
"""
Stop and disable the given application.
:ivar Application application: The ``Application`` to stop.
"""
application = field(type=Application, mandatory=True)
@property
def eliot_action(self):
return start_action(
_logger, _eliot_system(u"stopapplication"),
name=self.application.name,
)
def run(self, deployer):
application = self.application
unit_name = application.name
return deployer.docker_client.remove(unit_name)
@implementer(IStateChange)
class CreateDataset(PRecord):
"""
Create a new locally-owned dataset.
:ivar Dataset dataset: Dataset to create.
"""
dataset = field(type=Dataset, mandatory=True)
@property
def eliot_action(self):
return start_action(
_logger, _eliot_system(u"createdataset"),
dataset_id=self.dataset.dataset_id,
maximum_size=self.dataset.maximum_size,
)
def run(self, deployer):
volume = deployer.volume_service.get(
name=_to_volume_name(self.dataset.dataset_id),
size=VolumeSize(maximum_size=self.dataset.maximum_size)
)
return deployer.volume_service.create(volume)
@implementer(IStateChange)
@attributes(["dataset"])
class ResizeDataset(object):
"""
Resize an existing locally-owned dataset.
:ivar Dataset dataset: Dataset to resize.
"""
@property
def eliot_action(self):
return start_action(
_logger, _eliot_system(u"createdataset"),
dataset_id=self.dataset.dataset_id,
maximum_size=self.dataset.maximum_size,
)
def run(self, deployer):
volume = deployer.volume_service.get(
name=_to_volume_name(self.dataset.dataset_id),
size=VolumeSize(maximum_size=self.dataset.maximum_size)
)
return deployer.volume_service.set_maximum_size(volume)
@implementer(IStateChange)
@attributes(["dataset", "hostname"])
class HandoffDataset(object):
"""
A dataset handoff that needs to be performed from this node to another
node.
See :cls:`flocker.volume.VolumeService.handoff` for more details.
:ivar Dataset dataset: The dataset to hand off.
:ivar bytes hostname: The hostname of the node to which the dataset is
meant to be handed off.
"""
@property
def eliot_action(self):
return start_action(
_logger, _eliot_system(u"handoff"),
dataset_id=self.dataset.dataset_id,
hostname=self.hostname,
)
def run(self, deployer):
service = deployer.volume_service
destination = standard_node(self.hostname)
return service.handoff(
service.get(_to_volume_name(self.dataset.dataset_id)),
RemoteVolumeManager(destination))
@implementer(IStateChange)
@attributes(["dataset", "hostname"])
class PushDataset(object):
"""
A dataset push that needs to be performed from this node to another
node.
See :cls:`flocker.volume.VolumeService.push` for more details.
:ivar Dataset: The dataset to push.
:ivar bytes hostname: The hostname of the node to which the dataset is
meant to be pushed.
"""
@property
def eliot_action(self):
return start_action(
_logger, _eliot_system(u"push"),
dataset_id=self.dataset.dataset_id,
hostname=self.hostname,
)
def run(self, deployer):
service = deployer.volume_service
destination = standard_node(self.hostname)
return service.push(
service.get(_to_volume_name(self.dataset.dataset_id)),
RemoteVolumeManager(destination))
@implementer(IStateChange)
class DeleteDataset(PRecord):
"""
Delete all local copies of the dataset.
A better action would be one that deletes a specific manifestation
("volume" in flocker.volume legacy terminology). Unfortunately
currently "remotely owned volumes" (legacy terminology), aka
non-primary manifestations or replicas, are not exposed to the
deployer, so we have to enumerate them here.
:ivar Dataset dataset: The dataset to delete.
"""
dataset = field(mandatory=True, type=Dataset)
@property
def eliot_action(self):
return start_action(
_logger, _eliot_system("delete"),
dataset_id=self.dataset.dataset_id,
)
def run(self, deployer):
service = deployer.volume_service
d = service.enumerate()
def got_volumes(volumes):
deletions = []
for volume in volumes:
if volume.name.dataset_id == self.dataset.dataset_id:
deletions.append(service.pool.destroy(volume).addErrback(
write_failure, _logger, u"flocker:p2pdeployer:delete"))
return gatherResults(deletions)
d.addCallback(got_volumes)
return d
@implementer(IStateChange)
class SetProxies(PRecord):
"""
Set the ports which will be forwarded to other nodes.
:ivar ports: A collection of ``Proxy`` objects.
"""
ports = pset_field(Proxy)
@property
def eliot_action(self):
return start_action(
_logger, _eliot_system("setproxies"),
addresses=list(dict(port) for port in self.ports),
)
def run(self, deployer):
results = []
# XXX: The proxy manipulation operations are blocking. Convert to a
# non-blocking API. See https://clusterhq.atlassian.net/browse/FLOC-320
for proxy in deployer.network.enumerate_proxies():
try:
deployer.network.delete_proxy(proxy)
except:
results.append(fail())
for proxy in self.ports:
try:
deployer.network.create_proxy_to(proxy.ip, proxy.port)
except:
results.append(fail())
return gather_deferreds(results)
@implementer(IStateChange)
class OpenPorts(PRecord):
"""
Set the ports which will have the firewall opened.
:ivar ports: A list of :class:`OpenPort`s.
"""
ports = pset_field(OpenPort)
@property
def eliot_action(self):
return start_action(
_logger, _eliot_system("openports"),
ports=list(port.port for port in self.ports),
)
def run(self, deployer):
results = []
# XXX: The proxy manipulation operations are blocking. Convert to a
# non-blocking API. See https://clusterhq.atlassian.net/browse/FLOC-320
for open_port in deployer.network.enumerate_open_ports():
try:
deployer.network.delete_open_port(open_port)
except:
results.append(fail())
for open_port in self.ports:
try:
deployer.network.open_port(open_port.port)
except:
results.append(fail())
return gather_deferreds(results)
class NotInUseDatasets(object):
"""
Filter out datasets that are in use by applications.
For now we delay things like deletion until we know applications
aren't using the dataset. Later on we'll use leases to decouple
the application and dataset logic better; see
https://clusterhq.atlassian.net/browse/FLOC-1425.
"""
def __init__(self, node_state):
"""
:param NodeState node_state: Known local state.
"""
self._in_use_datasets = {app.volume.manifestation.dataset_id
for app in node_state.applications
if app.volume is not None}
def __call__(self, objects,
get_dataset_id=lambda d: unicode(d.dataset_id)):
"""
Filter out all objects whose dataset_id is in use.
:param objects: Objects to filter.
:param get_dataset_id: Callable to extract a ``dataset_id`` from
an object. By default looks up ``dataset_id`` attribute.
:return list: Filtered objects.
"""
result = []
for obj in objects:
if get_dataset_id(obj) not in self._in_use_datasets:
result.append(obj)
return result
@implementer(IDeployer)
class P2PManifestationDeployer(object):
"""
Discover and calculate changes for peer-to-peer manifestations (e.g. ZFS)
on a node.
:ivar unicode hostname: The hostname of the node that this is running on.
:ivar VolumeService volume_service: The volume manager for this node.
"""
def __init__(self, hostname, volume_service, node_uuid=None):
if node_uuid is None:
# To be removed in https://clusterhq.atlassian.net/browse/FLOC-1795
warn("UUID is required, this is for backwards compat with existing"
" tests only. If you see this in production code that's "
"a bug.", DeprecationWarning, stacklevel=2)
node_uuid = ip_to_uuid(hostname)
self.node_uuid = node_uuid
self.hostname = hostname
self.volume_service = volume_service
def discover_state(self, local_state):
"""
Discover local ZFS manifestations.
"""
# Add real namespace support in
# https://clusterhq.atlassian.net/browse/FLOC-737; for now we just
# strip the namespace since there will only ever be one.
volumes = self.volume_service.enumerate()
def map_volumes_to_size(volumes):
primary_manifestations = {}
for volume in volumes:
if volume.node_id == self.volume_service.node_id:
# FLOC-1240 non-primaries should be added in too
path = volume.get_filesystem().get_path()
primary_manifestations[path] = (
volume.name.dataset_id, volume.size.maximum_size)
return primary_manifestations
volumes.addCallback(map_volumes_to_size)
def got_volumes(available_manifestations):
manifestation_paths = {dataset_id: path for (path, (dataset_id, _))
in available_manifestations.items()}
manifestations = list(
Manifestation(dataset=Dataset(dataset_id=dataset_id,
maximum_size=maximum_size),
primary=True)
for (dataset_id, maximum_size) in
available_manifestations.values())
return [NodeState(
uuid=self.node_uuid,
hostname=self.hostname,
applications=None,
used_ports=None,
manifestations={manifestation.dataset_id: manifestation
for manifestation in manifestations},
paths=manifestation_paths,
devices={},
)]
volumes.addCallback(got_volumes)
return volumes
def calculate_changes(self, configuration, cluster_state):
"""
Calculate necessary changes to peer-to-peer manifestations.
Datasets that are in use by applications cannot be deleted,
handed-off or resized. See
https://clusterhq.atlassian.net/browse/FLOC-1425 for leases, a
better solution.
"""
local_state = cluster_state.get_node(self.node_uuid)
# We need to know applications (for now) to see if we should delay
# deletion or handoffs. Eventually this will rely on leases instead.
if local_state.applications is None:
return sequentially(changes=[])
phases = []
not_in_use_datasets = NotInUseDatasets(local_state)
# Find any dataset that are moving to or from this node - or
# that are being newly created by this new configuration.
dataset_changes = find_dataset_changes(
self.node_uuid, cluster_state, configuration)
resizing = not_in_use_datasets(dataset_changes.resizing)
if resizing:
phases.append(in_parallel(changes=[
ResizeDataset(dataset=dataset)
for dataset in resizing]))
going = not_in_use_datasets(dataset_changes.going,
lambda d: d.dataset.dataset_id)
if going:
phases.append(in_parallel(changes=[
HandoffDataset(dataset=handoff.dataset,
hostname=handoff.hostname)
for handoff in going]))
if dataset_changes.creating:
phases.append(in_parallel(changes=[
CreateDataset(dataset=dataset)
for dataset in dataset_changes.creating]))
deleting = not_in_use_datasets(dataset_changes.deleting)
if deleting:
phases.append(in_parallel(changes=[
DeleteDataset(dataset=dataset)
for dataset in deleting
]))
return sequentially(changes=phases)
@implementer(IDeployer)
class ApplicationNodeDeployer(object):
"""
Discover and calculate changes for applications running on a node.
:ivar unicode hostname: The hostname of the node that this is running
on.
:ivar IDockerClient docker_client: The Docker client API to use in
deployment operations. Default ``DockerClient``.
:ivar INetwork network: The network routing API to use in
deployment operations. Default is iptables-based implementation.
"""
def __init__(self, hostname, docker_client=None, network=None,
node_uuid=None):
if node_uuid is None:
# To be removed in https://clusterhq.atlassian.net/browse/FLOC-1795
warn("UUID is required, this is for backwards compat with existing"
" tests only. If you see this in production code that's "
"a bug.", DeprecationWarning, stacklevel=2)
node_uuid = ip_to_uuid(hostname)
self.node_uuid = node_uuid
self.hostname = hostname
if docker_client is None:
docker_client = DockerClient()
self.docker_client = docker_client
if network is None:
network = make_host_network()
self.network = network
def discover_state(self, local_state):
"""
List all the ``Application``\ s running on this node.
The given local state is used to figure out if applications have
attached volumes that are specific manifestations. If no
manifestations are known then discovery isn't done and ignorance
is claimed about applications. This ensures that the information
returned is accurate, and therefore that convergence is done
correctly.
This does mean you can't run an application agent without a
dataset agent. See
https://clusterhq.atlassian.net/browse/FLOC-1646.
:return: A ``Deferred`` which fires with a list containing a
``NodeState`` instance with information only about
``Application`` and ports. ``NodeState.manifestations`` and
``NodeState.paths`` will not be filled in.
"""
if local_state.manifestations is None:
# Without manifestations we don't know if local applications'
# volumes are manifestations or not. Rather than return
# incorrect information leading to possibly erroneous
# convergence actions, just declare ignorance. Eventually the
# convergence agent for datasets will discover the information
# and then we can proceed.
return succeed([NodeState(
uuid=self.node_uuid,
hostname=self.hostname,
applications=None,
used_ports=None,
manifestations=None,
paths=None,
)])
path_to_manifestations = {path: local_state.manifestations[dataset_id]
for (dataset_id, path)
in local_state.paths.items()}
d = self.docker_client.list()
def applications_from_units(units):
applications = []
for unit in units:
image = DockerImage.from_string(unit.container_image)
if unit.volumes:
# XXX https://clusterhq.atlassian.net/browse/FLOC-49
# we only support one volume per container
# at this time
# XXX https://clusterhq.atlassian.net/browse/FLOC-773
# we assume all volumes are datasets
docker_volume = list(unit.volumes)[0]
try:
manifestation = path_to_manifestations[
docker_volume.node_path]
except KeyError:
# Apparently not a dataset we're managing, give up.
volume = None
else:
volume = AttachedVolume(
manifestation=manifestation,
mountpoint=docker_volume.container_path)
else:
volume = None
ports = []
for portmap in unit.ports:
ports.append(Port(
internal_port=portmap.internal_port,
external_port=portmap.external_port
))
links = []
environment = []
if unit.environment:
environment_dict = unit.environment.to_dict()
for label, value in environment_dict.items():
# <ALIAS>_PORT_<PORTNUM>_TCP_PORT=<value>
parts = label.rsplit(b"_", 4)
try:
alias, pad_a, port, pad_b, pad_c = parts
local_port = int(port)
except ValueError:
# <ALIAS>_PORT_<PORT>_TCP
parts = label.rsplit(b"_", 3)
try:
alias, pad_a, port, pad_b = parts
except ValueError:
environment.append((label, value))
continue
if not (pad_a, pad_b) == (b"PORT", b"TCP"):
environment.append((label, value))
continue
if (pad_a, pad_b, pad_c) == (b"PORT", b"TCP", b"PORT"):
links.append(Link(
local_port=local_port,
remote_port=int(value),
alias=alias,
))
applications.append(Application(
name=unit.name,
image=image,
ports=frozenset(ports),
volume=volume,
environment=environment if environment else None,
links=frozenset(links),
restart_policy=unit.restart_policy,
running=(unit.activation_state == u"active"),
command_line=unit.command_line,
))
return [NodeState(
uuid=self.node_uuid,
hostname=self.hostname,
applications=applications,
used_ports=self.network.enumerate_used_ports(),
manifestations=None,
paths=None,
)]
d.addCallback(applications_from_units)
return d
def calculate_changes(self, desired_configuration, current_cluster_state):
"""
Work out which changes need to happen to the local state to match
the given desired state.
Currently this involves the following phases:
1. Change proxies to point to new addresses (should really be
last, see https://clusterhq.atlassian.net/browse/FLOC-380)
2. Stop all relevant containers.
3. Start and restart any containers that should be running
locally, so long as their required datasets are available.
"""
# We are a node-specific IDeployer:
current_node_state = current_cluster_state.get_node(
self.node_uuid, hostname=self.hostname)
if current_node_state.applications is None:
# We don't know current application state, so can't calculate
# anything. This will be the case if we don't know the local
# datasets' state yet; see notes in discover_state().
return sequentially(changes=[])
phases = []
desired_proxies = set()
desired_open_ports = set()
desired_node_applications = []
node_states = {node.uuid: node for node in current_cluster_state.nodes}
for node in desired_configuration.nodes:
if node.uuid == self.node_uuid:
desired_node_applications = node.applications
for application in node.applications:
for port in application.ports:
desired_open_ports.add(
OpenPort(port=port.external_port))
else:
for application in node.applications:
for port in application.ports:
# XXX: also need to do DNS resolution. See
# https://clusterhq.atlassian.net/browse/FLOC-322
if node.uuid in node_states:
desired_proxies.add(Proxy(
ip=node_states[node.uuid].hostname,
port=port.external_port))
if desired_proxies != set(self.network.enumerate_proxies()):
phases.append(SetProxies(ports=desired_proxies))
if desired_open_ports != set(self.network.enumerate_open_ports()):
phases.append(OpenPorts(ports=desired_open_ports))
all_applications = current_node_state.applications
# Compare the applications being changed by name only. Other
# configuration changes aren't important at this point.
local_application_names = {app.name for app in all_applications}
desired_local_state = {app.name for app in
desired_node_applications}
# Don't start applications that exist on this node but aren't
# running; Docker is in charge of restarts:
start_names = desired_local_state.difference(local_application_names)
stop_names = {app.name for app in all_applications}.difference(
desired_local_state)
start_containers = [
StartApplication(application=app, node_state=current_node_state)
for app in desired_node_applications
if ((app.name in start_names) and
# If manifestation isn't available yet, don't start:
# XXX in FLOC-1240 non-primaries should be checked.
(app.volume is None or
app.volume.manifestation.dataset_id in
current_node_state.manifestations))
]
stop_containers = [
StopApplication(application=app) for app in all_applications
if app.name in stop_names
]
restart_containers = []
applications_to_inspect = (
{app.name for app in all_applications} & desired_local_state)
current_applications_dict = dict(zip(
[a.name for a in all_applications], all_applications
))
desired_applications_dict = dict(zip(
[a.name for a in desired_node_applications],
desired_node_applications
))
for application_name in applications_to_inspect:
inspect_desired = desired_applications_dict[application_name]
inspect_current = current_applications_dict[application_name]
# For our purposes what we care about is if configuration has
# changed, so if it's not running but it's otherwise the same
# we don't want to do anything:
comparable_current = inspect_current.transform(["running"], True)
# Current state never has metadata on datasets, so remove from
# configuration:
comparable_desired = inspect_desired
if comparable_desired.volume is not None:
comparable_desired = comparable_desired.transform(
["volume", "manifestation", "dataset", "metadata"], {})
if comparable_desired != comparable_current:
restart_containers.append(sequentially(changes=[
StopApplication(application=inspect_current),
StartApplication(application=inspect_desired,
node_state=current_node_state),
]))
if stop_containers:
phases.append(in_parallel(changes=stop_containers))
start_restart = start_containers + restart_containers
if start_restart:
phases.append(in_parallel(changes=start_restart))
return sequentially(changes=phases)
def find_dataset_changes(uuid, current_state, desired_state):
"""
Find what actions need to be taken to deal with changes in dataset
manifestations between current state and desired state of the cluster.
XXX The logic here assumes the mountpoints have not changed,
and will act unexpectedly if that is the case. See
https://clusterhq.atlassian.net/browse/FLOC-351 for more details.
XXX The logic here assumes volumes are never added or removed to
existing applications, merely moved across nodes. As a result test
coverage for those situations is not implemented. See
https://clusterhq.atlassian.net/browse/FLOC-352 for more details.
:param UUID uuid: The uuid of the node for which to find changes.
:param Deployment current_state: The old state of the cluster on which the
changes are based.
:param Deployment desired_state: The new state of the cluster towards which
the changes are working.
:return DatasetChanges: Changes to datasets that will be needed in
order to match desired configuration.
"""
uuid_to_hostnames = {node.uuid: node.hostname
for node in current_state.nodes}
desired_datasets = {node.uuid:
set(manifestation.dataset for manifestation
in node.manifestations.values())
for node in desired_state.nodes}
current_datasets = {node.uuid:
set(manifestation.dataset for manifestation
# We pretend ignorance is equivalent to no
# datasets; this is wrong. See FLOC-2060.
in (node.manifestations or {}).values())
for node in current_state.nodes}
local_desired_datasets = desired_datasets.get(uuid, set())
local_desired_dataset_ids = set(dataset.dataset_id for dataset in
local_desired_datasets)
local_current_dataset_ids = set(dataset.dataset_id for dataset in
current_datasets.get(uuid, set()))
remote_current_dataset_ids = set()
for dataset_node_uuid, current in current_datasets.items():
if dataset_node_uuid != uuid:
remote_current_dataset_ids |= set(
dataset.dataset_id for dataset in current)
# If a dataset exists locally and is desired anywhere on the cluster, and
# the desired dataset is a different maximum_size to the existing dataset,
# the existing local dataset should be resized before any other action
# is taken on it.
resizing = set()
for desired in desired_datasets.values():
for new_dataset in desired:
if new_dataset.dataset_id in local_current_dataset_ids:
for cur_dataset in current_datasets[uuid]:
if cur_dataset.dataset_id != new_dataset.dataset_id:
continue
if cur_dataset.maximum_size != new_dataset.maximum_size:
resizing.add(new_dataset)
# Look at each dataset that is going to be running elsewhere and is
# currently running here, and add a DatasetHandoff for it to `going`.
going = set()
for dataset_node_uuid, desired in desired_datasets.items():
if dataset_node_uuid != uuid:
try:
hostname = uuid_to_hostnames[dataset_node_uuid]
except KeyError:
# Apparently we don't know NodeState for this
# node. Hopefully we'll learn this information eventually
# but until we do we can't proceed.
continue
for dataset in desired:
if dataset.dataset_id in local_current_dataset_ids:
going.add(DatasetHandoff(
dataset=dataset, hostname=hostname))
# For each dataset that is going to be hosted on this node and did not
# exist previously, make sure that dataset is in `creating`.
# Unfortunately the logic for "did not exist previously" is wrong; our
# knowledge of other nodes' state may be lacking if they are
# offline. See FLOC-2060.
creating_dataset_ids = local_desired_dataset_ids.difference(
local_current_dataset_ids | remote_current_dataset_ids)
creating = set(dataset for dataset in local_desired_datasets
if dataset.dataset_id in creating_dataset_ids)
deleting = set(dataset for dataset in chain(*desired_datasets.values())
if dataset.deleted)
return DatasetChanges(going=going, deleting=deleting,
creating=creating, resizing=resizing)
|
|
'''
datetime.tzinfo timezone definitions generated from the
Olson timezone database:
ftp://elsie.nci.nih.gov/pub/tz*.tar.gz
See the datetime section of the Python Library Reference for information
on how to use these modules.
'''
# The Olson database is updated several times a year.
OLSON_VERSION = '2014j'
VERSION = '2014.10' # Switching to pip compatible version numbering.
__version__ = VERSION
OLSEN_VERSION = OLSON_VERSION # Old releases had this misspelling
__all__ = [
'timezone', 'utc', 'country_timezones', 'country_names',
'AmbiguousTimeError', 'InvalidTimeError',
'NonExistentTimeError', 'UnknownTimeZoneError',
'all_timezones', 'all_timezones_set',
'common_timezones', 'common_timezones_set',
]
import sys, datetime, os.path, gettext
try:
from pkg_resources import resource_stream
except ImportError:
resource_stream = None
from pytz.exceptions import AmbiguousTimeError
from pytz.exceptions import InvalidTimeError
from pytz.exceptions import NonExistentTimeError
from pytz.exceptions import UnknownTimeZoneError
from pytz.lazy import LazyDict, LazyList, LazySet
from pytz.tzinfo import unpickler
from pytz.tzfile import build_tzinfo, _byte_string
try:
unicode
except NameError: # Python 3.x
# Python 3.x doesn't have unicode(), making writing code
# for Python 2.3 and Python 3.x a pain.
unicode = str
def ascii(s):
r"""
>>> ascii('Hello')
'Hello'
>>> ascii('\N{TRADE MARK SIGN}') #doctest: +IGNORE_EXCEPTION_DETAIL
Traceback (most recent call last):
...
UnicodeEncodeError: ...
"""
s.encode('US-ASCII') # Raise an exception if not ASCII
return s # But return the original string - not a byte string.
else: # Python 2.x
def ascii(s):
r"""
>>> ascii('Hello')
'Hello'
>>> ascii(u'Hello')
'Hello'
>>> ascii(u'\N{TRADE MARK SIGN}') #doctest: +IGNORE_EXCEPTION_DETAIL
Traceback (most recent call last):
...
UnicodeEncodeError: ...
"""
return s.encode('US-ASCII')
def open_resource(name):
"""Open a resource from the zoneinfo subdir for reading.
Uses the pkg_resources module if available and no standard file
found at the calculated location.
"""
name_parts = name.lstrip('/').split('/')
for part in name_parts:
if part == os.path.pardir or os.path.sep in part:
raise ValueError('Bad path segment: %r' % part)
filename = os.path.join(os.path.dirname(__file__),
'zoneinfo', *name_parts)
if not os.path.exists(filename) and resource_stream is not None:
# http://bugs.launchpad.net/bugs/383171 - we avoid using this
# unless absolutely necessary to help when a broken version of
# pkg_resources is installed.
return resource_stream(__name__, 'zoneinfo/' + name)
return open(filename, 'rb')
def resource_exists(name):
"""Return true if the given resource exists"""
try:
open_resource(name).close()
return True
except IOError:
return False
# Enable this when we get some translations?
# We want an i18n API that is useful to programs using Python's gettext
# module, as well as the Zope3 i18n package. Perhaps we should just provide
# the POT file and translations, and leave it up to callers to make use
# of them.
#
# t = gettext.translation(
# 'pytz', os.path.join(os.path.dirname(__file__), 'locales'),
# fallback=True
# )
# def _(timezone_name):
# """Translate a timezone name using the current locale, returning Unicode"""
# return t.ugettext(timezone_name)
_tzinfo_cache = {}
def timezone(zone):
r''' Return a datetime.tzinfo implementation for the given timezone
>>> from datetime import datetime, timedelta
>>> utc = timezone('UTC')
>>> eastern = timezone('US/Eastern')
>>> eastern.zone
'US/Eastern'
>>> timezone(unicode('US/Eastern')) is eastern
True
>>> utc_dt = datetime(2002, 10, 27, 6, 0, 0, tzinfo=utc)
>>> loc_dt = utc_dt.astimezone(eastern)
>>> fmt = '%Y-%m-%d %H:%M:%S %Z (%z)'
>>> loc_dt.strftime(fmt)
'2002-10-27 01:00:00 EST (-0500)'
>>> (loc_dt - timedelta(minutes=10)).strftime(fmt)
'2002-10-27 00:50:00 EST (-0500)'
>>> eastern.normalize(loc_dt - timedelta(minutes=10)).strftime(fmt)
'2002-10-27 01:50:00 EDT (-0400)'
>>> (loc_dt + timedelta(minutes=10)).strftime(fmt)
'2002-10-27 01:10:00 EST (-0500)'
Raises UnknownTimeZoneError if passed an unknown zone.
>>> try:
... timezone('Asia/Shangri-La')
... except UnknownTimeZoneError:
... print('Unknown')
Unknown
>>> try:
... timezone(unicode('\N{TRADE MARK SIGN}'))
... except UnknownTimeZoneError:
... print('Unknown')
Unknown
'''
if zone.upper() == 'UTC':
return utc
try:
zone = ascii(zone)
except UnicodeEncodeError:
# All valid timezones are ASCII
raise UnknownTimeZoneError(zone)
zone = _unmunge_zone(zone)
if zone not in _tzinfo_cache:
if zone in all_timezones_set:
fp = open_resource(zone)
try:
_tzinfo_cache[zone] = build_tzinfo(zone, fp)
finally:
fp.close()
else:
raise UnknownTimeZoneError(zone)
return _tzinfo_cache[zone]
def _unmunge_zone(zone):
"""Undo the time zone name munging done by older versions of pytz."""
return zone.replace('_plus_', '+').replace('_minus_', '-')
ZERO = datetime.timedelta(0)
HOUR = datetime.timedelta(hours=1)
class UTC(datetime.tzinfo):
"""UTC
Optimized UTC implementation. It unpickles using the single module global
instance defined beneath this class declaration.
"""
zone = "UTC"
_utcoffset = ZERO
_dst = ZERO
_tzname = zone
def fromutc(self, dt):
if dt.tzinfo is None:
return self.localize(dt)
return super(utc.__class__, self).fromutc(dt)
def utcoffset(self, dt):
return ZERO
def tzname(self, dt):
return "UTC"
def dst(self, dt):
return ZERO
def __reduce__(self):
return _UTC, ()
def localize(self, dt, is_dst=False):
'''Convert naive time to local time'''
if dt.tzinfo is not None:
raise ValueError('Not naive datetime (tzinfo is already set)')
return dt.replace(tzinfo=self)
def normalize(self, dt, is_dst=False):
'''Correct the timezone information on the given datetime'''
if dt.tzinfo is self:
return dt
if dt.tzinfo is None:
raise ValueError('Naive time - no tzinfo set')
return dt.astimezone(self)
def __repr__(self):
return "<UTC>"
def __str__(self):
return "UTC"
UTC = utc = UTC() # UTC is a singleton
def _UTC():
"""Factory function for utc unpickling.
Makes sure that unpickling a utc instance always returns the same
module global.
These examples belong in the UTC class above, but it is obscured; or in
the README.txt, but we are not depending on Python 2.4 so integrating
the README.txt examples with the unit tests is not trivial.
>>> import datetime, pickle
>>> dt = datetime.datetime(2005, 3, 1, 14, 13, 21, tzinfo=utc)
>>> naive = dt.replace(tzinfo=None)
>>> p = pickle.dumps(dt, 1)
>>> naive_p = pickle.dumps(naive, 1)
>>> len(p) - len(naive_p)
17
>>> new = pickle.loads(p)
>>> new == dt
True
>>> new is dt
False
>>> new.tzinfo is dt.tzinfo
True
>>> utc is UTC is timezone('UTC')
True
>>> utc is timezone('GMT')
False
"""
return utc
_UTC.__safe_for_unpickling__ = True
def _p(*args):
"""Factory function for unpickling pytz tzinfo instances.
Just a wrapper around tzinfo.unpickler to save a few bytes in each pickle
by shortening the path.
"""
return unpickler(*args)
_p.__safe_for_unpickling__ = True
class _CountryTimezoneDict(LazyDict):
"""Map ISO 3166 country code to a list of timezone names commonly used
in that country.
iso3166_code is the two letter code used to identify the country.
>>> def print_list(list_of_strings):
... 'We use a helper so doctests work under Python 2.3 -> 3.x'
... for s in list_of_strings:
... print(s)
>>> print_list(country_timezones['nz'])
Pacific/Auckland
Pacific/Chatham
>>> print_list(country_timezones['ch'])
Europe/Zurich
>>> print_list(country_timezones['CH'])
Europe/Zurich
>>> print_list(country_timezones[unicode('ch')])
Europe/Zurich
>>> print_list(country_timezones['XXX'])
Traceback (most recent call last):
...
KeyError: 'XXX'
Previously, this information was exposed as a function rather than a
dictionary. This is still supported::
>>> print_list(country_timezones('nz'))
Pacific/Auckland
Pacific/Chatham
"""
def __call__(self, iso3166_code):
"""Backwards compatibility."""
return self[iso3166_code]
def _fill(self):
data = {}
zone_tab = open_resource('zone.tab')
try:
for line in zone_tab:
line = line.decode('US-ASCII')
if line.startswith('#'):
continue
code, coordinates, zone = line.split(None, 4)[:3]
if zone not in all_timezones_set:
continue
try:
data[code].append(zone)
except KeyError:
data[code] = [zone]
self.data = data
finally:
zone_tab.close()
country_timezones = _CountryTimezoneDict()
class _CountryNameDict(LazyDict):
'''Dictionary proving ISO3166 code -> English name.
>>> print(country_names['au'])
Australia
'''
def _fill(self):
data = {}
zone_tab = open_resource('iso3166.tab')
try:
for line in zone_tab.readlines():
line = line.decode('US-ASCII')
if line.startswith('#'):
continue
code, name = line.split(None, 1)
data[code] = name.strip()
self.data = data
finally:
zone_tab.close()
country_names = _CountryNameDict()
# Time-zone info based solely on fixed offsets
class _FixedOffset(datetime.tzinfo):
zone = None # to match the standard pytz API
def __init__(self, minutes):
if abs(minutes) >= 1440:
raise ValueError("absolute offset is too large", minutes)
self._minutes = minutes
self._offset = datetime.timedelta(minutes=minutes)
def utcoffset(self, dt):
return self._offset
def __reduce__(self):
return FixedOffset, (self._minutes, )
def dst(self, dt):
return ZERO
def tzname(self, dt):
return None
def __repr__(self):
return 'pytz.FixedOffset(%d)' % self._minutes
def localize(self, dt, is_dst=False):
'''Convert naive time to local time'''
if dt.tzinfo is not None:
raise ValueError('Not naive datetime (tzinfo is already set)')
return dt.replace(tzinfo=self)
def normalize(self, dt, is_dst=False):
'''Correct the timezone information on the given datetime'''
if dt.tzinfo is None:
raise ValueError('Naive time - no tzinfo set')
return dt.replace(tzinfo=self)
def FixedOffset(offset, _tzinfos = {}):
"""return a fixed-offset timezone based off a number of minutes.
>>> one = FixedOffset(-330)
>>> one
pytz.FixedOffset(-330)
>>> one.utcoffset(datetime.datetime.now())
datetime.timedelta(-1, 66600)
>>> one.dst(datetime.datetime.now())
datetime.timedelta(0)
>>> two = FixedOffset(1380)
>>> two
pytz.FixedOffset(1380)
>>> two.utcoffset(datetime.datetime.now())
datetime.timedelta(0, 82800)
>>> two.dst(datetime.datetime.now())
datetime.timedelta(0)
The datetime.timedelta must be between the range of -1 and 1 day,
non-inclusive.
>>> FixedOffset(1440)
Traceback (most recent call last):
...
ValueError: ('absolute offset is too large', 1440)
>>> FixedOffset(-1440)
Traceback (most recent call last):
...
ValueError: ('absolute offset is too large', -1440)
An offset of 0 is special-cased to return UTC.
>>> FixedOffset(0) is UTC
True
There should always be only one instance of a FixedOffset per timedelta.
This should be true for multiple creation calls.
>>> FixedOffset(-330) is one
True
>>> FixedOffset(1380) is two
True
It should also be true for pickling.
>>> import pickle
>>> pickle.loads(pickle.dumps(one)) is one
True
>>> pickle.loads(pickle.dumps(two)) is two
True
"""
if offset == 0:
return UTC
info = _tzinfos.get(offset)
if info is None:
# We haven't seen this one before. we need to save it.
# Use setdefault to avoid a race condition and make sure we have
# only one
info = _tzinfos.setdefault(offset, _FixedOffset(offset))
return info
FixedOffset.__safe_for_unpickling__ = True
def _test():
import doctest, os, sys
sys.path.insert(0, os.pardir)
import pytz
return doctest.testmod(pytz)
if __name__ == '__main__':
_test()
all_timezones = \
['Africa/Abidjan',
'Africa/Accra',
'Africa/Addis_Ababa',
'Africa/Algiers',
'Africa/Asmara',
'Africa/Asmera',
'Africa/Bamako',
'Africa/Bangui',
'Africa/Banjul',
'Africa/Bissau',
'Africa/Blantyre',
'Africa/Brazzaville',
'Africa/Bujumbura',
'Africa/Cairo',
'Africa/Casablanca',
'Africa/Ceuta',
'Africa/Conakry',
'Africa/Dakar',
'Africa/Dar_es_Salaam',
'Africa/Djibouti',
'Africa/Douala',
'Africa/El_Aaiun',
'Africa/Freetown',
'Africa/Gaborone',
'Africa/Harare',
'Africa/Johannesburg',
'Africa/Juba',
'Africa/Kampala',
'Africa/Khartoum',
'Africa/Kigali',
'Africa/Kinshasa',
'Africa/Lagos',
'Africa/Libreville',
'Africa/Lome',
'Africa/Luanda',
'Africa/Lubumbashi',
'Africa/Lusaka',
'Africa/Malabo',
'Africa/Maputo',
'Africa/Maseru',
'Africa/Mbabane',
'Africa/Mogadishu',
'Africa/Monrovia',
'Africa/Nairobi',
'Africa/Ndjamena',
'Africa/Niamey',
'Africa/Nouakchott',
'Africa/Ouagadougou',
'Africa/Porto-Novo',
'Africa/Sao_Tome',
'Africa/Timbuktu',
'Africa/Tripoli',
'Africa/Tunis',
'Africa/Windhoek',
'America/Adak',
'America/Anchorage',
'America/Anguilla',
'America/Antigua',
'America/Araguaina',
'America/Argentina/Buenos_Aires',
'America/Argentina/Catamarca',
'America/Argentina/ComodRivadavia',
'America/Argentina/Cordoba',
'America/Argentina/Jujuy',
'America/Argentina/La_Rioja',
'America/Argentina/Mendoza',
'America/Argentina/Rio_Gallegos',
'America/Argentina/Salta',
'America/Argentina/San_Juan',
'America/Argentina/San_Luis',
'America/Argentina/Tucuman',
'America/Argentina/Ushuaia',
'America/Aruba',
'America/Asuncion',
'America/Atikokan',
'America/Atka',
'America/Bahia',
'America/Bahia_Banderas',
'America/Barbados',
'America/Belem',
'America/Belize',
'America/Blanc-Sablon',
'America/Boa_Vista',
'America/Bogota',
'America/Boise',
'America/Buenos_Aires',
'America/Cambridge_Bay',
'America/Campo_Grande',
'America/Cancun',
'America/Caracas',
'America/Catamarca',
'America/Cayenne',
'America/Cayman',
'America/Chicago',
'America/Chihuahua',
'America/Coral_Harbour',
'America/Cordoba',
'America/Costa_Rica',
'America/Creston',
'America/Cuiaba',
'America/Curacao',
'America/Danmarkshavn',
'America/Dawson',
'America/Dawson_Creek',
'America/Denver',
'America/Detroit',
'America/Dominica',
'America/Edmonton',
'America/Eirunepe',
'America/El_Salvador',
'America/Ensenada',
'America/Fort_Wayne',
'America/Fortaleza',
'America/Glace_Bay',
'America/Godthab',
'America/Goose_Bay',
'America/Grand_Turk',
'America/Grenada',
'America/Guadeloupe',
'America/Guatemala',
'America/Guayaquil',
'America/Guyana',
'America/Halifax',
'America/Havana',
'America/Hermosillo',
'America/Indiana/Indianapolis',
'America/Indiana/Knox',
'America/Indiana/Marengo',
'America/Indiana/Petersburg',
'America/Indiana/Tell_City',
'America/Indiana/Vevay',
'America/Indiana/Vincennes',
'America/Indiana/Winamac',
'America/Indianapolis',
'America/Inuvik',
'America/Iqaluit',
'America/Jamaica',
'America/Jujuy',
'America/Juneau',
'America/Kentucky/Louisville',
'America/Kentucky/Monticello',
'America/Knox_IN',
'America/Kralendijk',
'America/La_Paz',
'America/Lima',
'America/Los_Angeles',
'America/Louisville',
'America/Lower_Princes',
'America/Maceio',
'America/Managua',
'America/Manaus',
'America/Marigot',
'America/Martinique',
'America/Matamoros',
'America/Mazatlan',
'America/Mendoza',
'America/Menominee',
'America/Merida',
'America/Metlakatla',
'America/Mexico_City',
'America/Miquelon',
'America/Moncton',
'America/Monterrey',
'America/Montevideo',
'America/Montreal',
'America/Montserrat',
'America/Nassau',
'America/New_York',
'America/Nipigon',
'America/Nome',
'America/Noronha',
'America/North_Dakota/Beulah',
'America/North_Dakota/Center',
'America/North_Dakota/New_Salem',
'America/Ojinaga',
'America/Panama',
'America/Pangnirtung',
'America/Paramaribo',
'America/Phoenix',
'America/Port-au-Prince',
'America/Port_of_Spain',
'America/Porto_Acre',
'America/Porto_Velho',
'America/Puerto_Rico',
'America/Rainy_River',
'America/Rankin_Inlet',
'America/Recife',
'America/Regina',
'America/Resolute',
'America/Rio_Branco',
'America/Rosario',
'America/Santa_Isabel',
'America/Santarem',
'America/Santiago',
'America/Santo_Domingo',
'America/Sao_Paulo',
'America/Scoresbysund',
'America/Shiprock',
'America/Sitka',
'America/St_Barthelemy',
'America/St_Johns',
'America/St_Kitts',
'America/St_Lucia',
'America/St_Thomas',
'America/St_Vincent',
'America/Swift_Current',
'America/Tegucigalpa',
'America/Thule',
'America/Thunder_Bay',
'America/Tijuana',
'America/Toronto',
'America/Tortola',
'America/Vancouver',
'America/Virgin',
'America/Whitehorse',
'America/Winnipeg',
'America/Yakutat',
'America/Yellowknife',
'Antarctica/Casey',
'Antarctica/Davis',
'Antarctica/DumontDUrville',
'Antarctica/Macquarie',
'Antarctica/Mawson',
'Antarctica/McMurdo',
'Antarctica/Palmer',
'Antarctica/Rothera',
'Antarctica/South_Pole',
'Antarctica/Syowa',
'Antarctica/Troll',
'Antarctica/Vostok',
'Arctic/Longyearbyen',
'Asia/Aden',
'Asia/Almaty',
'Asia/Amman',
'Asia/Anadyr',
'Asia/Aqtau',
'Asia/Aqtobe',
'Asia/Ashgabat',
'Asia/Ashkhabad',
'Asia/Baghdad',
'Asia/Bahrain',
'Asia/Baku',
'Asia/Bangkok',
'Asia/Beirut',
'Asia/Bishkek',
'Asia/Brunei',
'Asia/Calcutta',
'Asia/Chita',
'Asia/Choibalsan',
'Asia/Chongqing',
'Asia/Chungking',
'Asia/Colombo',
'Asia/Dacca',
'Asia/Damascus',
'Asia/Dhaka',
'Asia/Dili',
'Asia/Dubai',
'Asia/Dushanbe',
'Asia/Gaza',
'Asia/Harbin',
'Asia/Hebron',
'Asia/Ho_Chi_Minh',
'Asia/Hong_Kong',
'Asia/Hovd',
'Asia/Irkutsk',
'Asia/Istanbul',
'Asia/Jakarta',
'Asia/Jayapura',
'Asia/Jerusalem',
'Asia/Kabul',
'Asia/Kamchatka',
'Asia/Karachi',
'Asia/Kashgar',
'Asia/Kathmandu',
'Asia/Katmandu',
'Asia/Khandyga',
'Asia/Kolkata',
'Asia/Krasnoyarsk',
'Asia/Kuala_Lumpur',
'Asia/Kuching',
'Asia/Kuwait',
'Asia/Macao',
'Asia/Macau',
'Asia/Magadan',
'Asia/Makassar',
'Asia/Manila',
'Asia/Muscat',
'Asia/Nicosia',
'Asia/Novokuznetsk',
'Asia/Novosibirsk',
'Asia/Omsk',
'Asia/Oral',
'Asia/Phnom_Penh',
'Asia/Pontianak',
'Asia/Pyongyang',
'Asia/Qatar',
'Asia/Qyzylorda',
'Asia/Rangoon',
'Asia/Riyadh',
'Asia/Saigon',
'Asia/Sakhalin',
'Asia/Samarkand',
'Asia/Seoul',
'Asia/Shanghai',
'Asia/Singapore',
'Asia/Srednekolymsk',
'Asia/Taipei',
'Asia/Tashkent',
'Asia/Tbilisi',
'Asia/Tehran',
'Asia/Tel_Aviv',
'Asia/Thimbu',
'Asia/Thimphu',
'Asia/Tokyo',
'Asia/Ujung_Pandang',
'Asia/Ulaanbaatar',
'Asia/Ulan_Bator',
'Asia/Urumqi',
'Asia/Ust-Nera',
'Asia/Vientiane',
'Asia/Vladivostok',
'Asia/Yakutsk',
'Asia/Yekaterinburg',
'Asia/Yerevan',
'Atlantic/Azores',
'Atlantic/Bermuda',
'Atlantic/Canary',
'Atlantic/Cape_Verde',
'Atlantic/Faeroe',
'Atlantic/Faroe',
'Atlantic/Jan_Mayen',
'Atlantic/Madeira',
'Atlantic/Reykjavik',
'Atlantic/South_Georgia',
'Atlantic/St_Helena',
'Atlantic/Stanley',
'Australia/ACT',
'Australia/Adelaide',
'Australia/Brisbane',
'Australia/Broken_Hill',
'Australia/Canberra',
'Australia/Currie',
'Australia/Darwin',
'Australia/Eucla',
'Australia/Hobart',
'Australia/LHI',
'Australia/Lindeman',
'Australia/Lord_Howe',
'Australia/Melbourne',
'Australia/NSW',
'Australia/North',
'Australia/Perth',
'Australia/Queensland',
'Australia/South',
'Australia/Sydney',
'Australia/Tasmania',
'Australia/Victoria',
'Australia/West',
'Australia/Yancowinna',
'Brazil/Acre',
'Brazil/DeNoronha',
'Brazil/East',
'Brazil/West',
'CET',
'CST6CDT',
'Canada/Atlantic',
'Canada/Central',
'Canada/East-Saskatchewan',
'Canada/Eastern',
'Canada/Mountain',
'Canada/Newfoundland',
'Canada/Pacific',
'Canada/Saskatchewan',
'Canada/Yukon',
'Chile/Continental',
'Chile/EasterIsland',
'Cuba',
'EET',
'EST',
'EST5EDT',
'Egypt',
'Eire',
'Etc/GMT',
'Etc/GMT+0',
'Etc/GMT+1',
'Etc/GMT+10',
'Etc/GMT+11',
'Etc/GMT+12',
'Etc/GMT+2',
'Etc/GMT+3',
'Etc/GMT+4',
'Etc/GMT+5',
'Etc/GMT+6',
'Etc/GMT+7',
'Etc/GMT+8',
'Etc/GMT+9',
'Etc/GMT-0',
'Etc/GMT-1',
'Etc/GMT-10',
'Etc/GMT-11',
'Etc/GMT-12',
'Etc/GMT-13',
'Etc/GMT-14',
'Etc/GMT-2',
'Etc/GMT-3',
'Etc/GMT-4',
'Etc/GMT-5',
'Etc/GMT-6',
'Etc/GMT-7',
'Etc/GMT-8',
'Etc/GMT-9',
'Etc/GMT0',
'Etc/Greenwich',
'Etc/UCT',
'Etc/UTC',
'Etc/Universal',
'Etc/Zulu',
'Europe/Amsterdam',
'Europe/Andorra',
'Europe/Athens',
'Europe/Belfast',
'Europe/Belgrade',
'Europe/Berlin',
'Europe/Bratislava',
'Europe/Brussels',
'Europe/Bucharest',
'Europe/Budapest',
'Europe/Busingen',
'Europe/Chisinau',
'Europe/Copenhagen',
'Europe/Dublin',
'Europe/Gibraltar',
'Europe/Guernsey',
'Europe/Helsinki',
'Europe/Isle_of_Man',
'Europe/Istanbul',
'Europe/Jersey',
'Europe/Kaliningrad',
'Europe/Kiev',
'Europe/Lisbon',
'Europe/Ljubljana',
'Europe/London',
'Europe/Luxembourg',
'Europe/Madrid',
'Europe/Malta',
'Europe/Mariehamn',
'Europe/Minsk',
'Europe/Monaco',
'Europe/Moscow',
'Europe/Nicosia',
'Europe/Oslo',
'Europe/Paris',
'Europe/Podgorica',
'Europe/Prague',
'Europe/Riga',
'Europe/Rome',
'Europe/Samara',
'Europe/San_Marino',
'Europe/Sarajevo',
'Europe/Simferopol',
'Europe/Skopje',
'Europe/Sofia',
'Europe/Stockholm',
'Europe/Tallinn',
'Europe/Tirane',
'Europe/Tiraspol',
'Europe/Uzhgorod',
'Europe/Vaduz',
'Europe/Vatican',
'Europe/Vienna',
'Europe/Vilnius',
'Europe/Volgograd',
'Europe/Warsaw',
'Europe/Zagreb',
'Europe/Zaporozhye',
'Europe/Zurich',
'GB',
'GB-Eire',
'GMT',
'GMT+0',
'GMT-0',
'GMT0',
'Greenwich',
'HST',
'Hongkong',
'Iceland',
'Indian/Antananarivo',
'Indian/Chagos',
'Indian/Christmas',
'Indian/Cocos',
'Indian/Comoro',
'Indian/Kerguelen',
'Indian/Mahe',
'Indian/Maldives',
'Indian/Mauritius',
'Indian/Mayotte',
'Indian/Reunion',
'Iran',
'Israel',
'Jamaica',
'Japan',
'Kwajalein',
'Libya',
'MET',
'MST',
'MST7MDT',
'Mexico/BajaNorte',
'Mexico/BajaSur',
'Mexico/General',
'NZ',
'NZ-CHAT',
'Navajo',
'PRC',
'PST8PDT',
'Pacific/Apia',
'Pacific/Auckland',
'Pacific/Bougainville',
'Pacific/Chatham',
'Pacific/Chuuk',
'Pacific/Easter',
'Pacific/Efate',
'Pacific/Enderbury',
'Pacific/Fakaofo',
'Pacific/Fiji',
'Pacific/Funafuti',
'Pacific/Galapagos',
'Pacific/Gambier',
'Pacific/Guadalcanal',
'Pacific/Guam',
'Pacific/Honolulu',
'Pacific/Johnston',
'Pacific/Kiritimati',
'Pacific/Kosrae',
'Pacific/Kwajalein',
'Pacific/Majuro',
'Pacific/Marquesas',
'Pacific/Midway',
'Pacific/Nauru',
'Pacific/Niue',
'Pacific/Norfolk',
'Pacific/Noumea',
'Pacific/Pago_Pago',
'Pacific/Palau',
'Pacific/Pitcairn',
'Pacific/Pohnpei',
'Pacific/Ponape',
'Pacific/Port_Moresby',
'Pacific/Rarotonga',
'Pacific/Saipan',
'Pacific/Samoa',
'Pacific/Tahiti',
'Pacific/Tarawa',
'Pacific/Tongatapu',
'Pacific/Truk',
'Pacific/Wake',
'Pacific/Wallis',
'Pacific/Yap',
'Poland',
'Portugal',
'ROC',
'ROK',
'Singapore',
'Turkey',
'UCT',
'US/Alaska',
'US/Aleutian',
'US/Arizona',
'US/Central',
'US/East-Indiana',
'US/Eastern',
'US/Hawaii',
'US/Indiana-Starke',
'US/Michigan',
'US/Mountain',
'US/Pacific',
'US/Pacific-New',
'US/Samoa',
'UTC',
'Universal',
'W-SU',
'WET',
'Zulu']
all_timezones = LazyList(
tz for tz in all_timezones if resource_exists(tz))
all_timezones_set = LazySet(all_timezones)
common_timezones = \
['Africa/Abidjan',
'Africa/Accra',
'Africa/Addis_Ababa',
'Africa/Algiers',
'Africa/Asmara',
'Africa/Bamako',
'Africa/Bangui',
'Africa/Banjul',
'Africa/Bissau',
'Africa/Blantyre',
'Africa/Brazzaville',
'Africa/Bujumbura',
'Africa/Cairo',
'Africa/Casablanca',
'Africa/Ceuta',
'Africa/Conakry',
'Africa/Dakar',
'Africa/Dar_es_Salaam',
'Africa/Djibouti',
'Africa/Douala',
'Africa/El_Aaiun',
'Africa/Freetown',
'Africa/Gaborone',
'Africa/Harare',
'Africa/Johannesburg',
'Africa/Juba',
'Africa/Kampala',
'Africa/Khartoum',
'Africa/Kigali',
'Africa/Kinshasa',
'Africa/Lagos',
'Africa/Libreville',
'Africa/Lome',
'Africa/Luanda',
'Africa/Lubumbashi',
'Africa/Lusaka',
'Africa/Malabo',
'Africa/Maputo',
'Africa/Maseru',
'Africa/Mbabane',
'Africa/Mogadishu',
'Africa/Monrovia',
'Africa/Nairobi',
'Africa/Ndjamena',
'Africa/Niamey',
'Africa/Nouakchott',
'Africa/Ouagadougou',
'Africa/Porto-Novo',
'Africa/Sao_Tome',
'Africa/Tripoli',
'Africa/Tunis',
'Africa/Windhoek',
'America/Adak',
'America/Anchorage',
'America/Anguilla',
'America/Antigua',
'America/Araguaina',
'America/Argentina/Buenos_Aires',
'America/Argentina/Catamarca',
'America/Argentina/Cordoba',
'America/Argentina/Jujuy',
'America/Argentina/La_Rioja',
'America/Argentina/Mendoza',
'America/Argentina/Rio_Gallegos',
'America/Argentina/Salta',
'America/Argentina/San_Juan',
'America/Argentina/San_Luis',
'America/Argentina/Tucuman',
'America/Argentina/Ushuaia',
'America/Aruba',
'America/Asuncion',
'America/Atikokan',
'America/Bahia',
'America/Bahia_Banderas',
'America/Barbados',
'America/Belem',
'America/Belize',
'America/Blanc-Sablon',
'America/Boa_Vista',
'America/Bogota',
'America/Boise',
'America/Cambridge_Bay',
'America/Campo_Grande',
'America/Cancun',
'America/Caracas',
'America/Cayenne',
'America/Cayman',
'America/Chicago',
'America/Chihuahua',
'America/Costa_Rica',
'America/Creston',
'America/Cuiaba',
'America/Curacao',
'America/Danmarkshavn',
'America/Dawson',
'America/Dawson_Creek',
'America/Denver',
'America/Detroit',
'America/Dominica',
'America/Edmonton',
'America/Eirunepe',
'America/El_Salvador',
'America/Fortaleza',
'America/Glace_Bay',
'America/Godthab',
'America/Goose_Bay',
'America/Grand_Turk',
'America/Grenada',
'America/Guadeloupe',
'America/Guatemala',
'America/Guayaquil',
'America/Guyana',
'America/Halifax',
'America/Havana',
'America/Hermosillo',
'America/Indiana/Indianapolis',
'America/Indiana/Knox',
'America/Indiana/Marengo',
'America/Indiana/Petersburg',
'America/Indiana/Tell_City',
'America/Indiana/Vevay',
'America/Indiana/Vincennes',
'America/Indiana/Winamac',
'America/Inuvik',
'America/Iqaluit',
'America/Jamaica',
'America/Juneau',
'America/Kentucky/Louisville',
'America/Kentucky/Monticello',
'America/Kralendijk',
'America/La_Paz',
'America/Lima',
'America/Los_Angeles',
'America/Lower_Princes',
'America/Maceio',
'America/Managua',
'America/Manaus',
'America/Marigot',
'America/Martinique',
'America/Matamoros',
'America/Mazatlan',
'America/Menominee',
'America/Merida',
'America/Metlakatla',
'America/Mexico_City',
'America/Miquelon',
'America/Moncton',
'America/Monterrey',
'America/Montevideo',
'America/Montreal',
'America/Montserrat',
'America/Nassau',
'America/New_York',
'America/Nipigon',
'America/Nome',
'America/Noronha',
'America/North_Dakota/Beulah',
'America/North_Dakota/Center',
'America/North_Dakota/New_Salem',
'America/Ojinaga',
'America/Panama',
'America/Pangnirtung',
'America/Paramaribo',
'America/Phoenix',
'America/Port-au-Prince',
'America/Port_of_Spain',
'America/Porto_Velho',
'America/Puerto_Rico',
'America/Rainy_River',
'America/Rankin_Inlet',
'America/Recife',
'America/Regina',
'America/Resolute',
'America/Rio_Branco',
'America/Santa_Isabel',
'America/Santarem',
'America/Santiago',
'America/Santo_Domingo',
'America/Sao_Paulo',
'America/Scoresbysund',
'America/Sitka',
'America/St_Barthelemy',
'America/St_Johns',
'America/St_Kitts',
'America/St_Lucia',
'America/St_Thomas',
'America/St_Vincent',
'America/Swift_Current',
'America/Tegucigalpa',
'America/Thule',
'America/Thunder_Bay',
'America/Tijuana',
'America/Toronto',
'America/Tortola',
'America/Vancouver',
'America/Whitehorse',
'America/Winnipeg',
'America/Yakutat',
'America/Yellowknife',
'Antarctica/Casey',
'Antarctica/Davis',
'Antarctica/DumontDUrville',
'Antarctica/Macquarie',
'Antarctica/Mawson',
'Antarctica/McMurdo',
'Antarctica/Palmer',
'Antarctica/Rothera',
'Antarctica/Syowa',
'Antarctica/Troll',
'Antarctica/Vostok',
'Arctic/Longyearbyen',
'Asia/Aden',
'Asia/Almaty',
'Asia/Amman',
'Asia/Anadyr',
'Asia/Aqtau',
'Asia/Aqtobe',
'Asia/Ashgabat',
'Asia/Baghdad',
'Asia/Bahrain',
'Asia/Baku',
'Asia/Bangkok',
'Asia/Beirut',
'Asia/Bishkek',
'Asia/Brunei',
'Asia/Chita',
'Asia/Choibalsan',
'Asia/Colombo',
'Asia/Damascus',
'Asia/Dhaka',
'Asia/Dili',
'Asia/Dubai',
'Asia/Dushanbe',
'Asia/Gaza',
'Asia/Hebron',
'Asia/Ho_Chi_Minh',
'Asia/Hong_Kong',
'Asia/Hovd',
'Asia/Irkutsk',
'Asia/Jakarta',
'Asia/Jayapura',
'Asia/Jerusalem',
'Asia/Kabul',
'Asia/Kamchatka',
'Asia/Karachi',
'Asia/Kathmandu',
'Asia/Khandyga',
'Asia/Kolkata',
'Asia/Krasnoyarsk',
'Asia/Kuala_Lumpur',
'Asia/Kuching',
'Asia/Kuwait',
'Asia/Macau',
'Asia/Magadan',
'Asia/Makassar',
'Asia/Manila',
'Asia/Muscat',
'Asia/Nicosia',
'Asia/Novokuznetsk',
'Asia/Novosibirsk',
'Asia/Omsk',
'Asia/Oral',
'Asia/Phnom_Penh',
'Asia/Pontianak',
'Asia/Pyongyang',
'Asia/Qatar',
'Asia/Qyzylorda',
'Asia/Rangoon',
'Asia/Riyadh',
'Asia/Sakhalin',
'Asia/Samarkand',
'Asia/Seoul',
'Asia/Shanghai',
'Asia/Singapore',
'Asia/Srednekolymsk',
'Asia/Taipei',
'Asia/Tashkent',
'Asia/Tbilisi',
'Asia/Tehran',
'Asia/Thimphu',
'Asia/Tokyo',
'Asia/Ulaanbaatar',
'Asia/Urumqi',
'Asia/Ust-Nera',
'Asia/Vientiane',
'Asia/Vladivostok',
'Asia/Yakutsk',
'Asia/Yekaterinburg',
'Asia/Yerevan',
'Atlantic/Azores',
'Atlantic/Bermuda',
'Atlantic/Canary',
'Atlantic/Cape_Verde',
'Atlantic/Faroe',
'Atlantic/Madeira',
'Atlantic/Reykjavik',
'Atlantic/South_Georgia',
'Atlantic/St_Helena',
'Atlantic/Stanley',
'Australia/Adelaide',
'Australia/Brisbane',
'Australia/Broken_Hill',
'Australia/Currie',
'Australia/Darwin',
'Australia/Eucla',
'Australia/Hobart',
'Australia/Lindeman',
'Australia/Lord_Howe',
'Australia/Melbourne',
'Australia/Perth',
'Australia/Sydney',
'Canada/Atlantic',
'Canada/Central',
'Canada/Eastern',
'Canada/Mountain',
'Canada/Newfoundland',
'Canada/Pacific',
'Europe/Amsterdam',
'Europe/Andorra',
'Europe/Athens',
'Europe/Belgrade',
'Europe/Berlin',
'Europe/Bratislava',
'Europe/Brussels',
'Europe/Bucharest',
'Europe/Budapest',
'Europe/Busingen',
'Europe/Chisinau',
'Europe/Copenhagen',
'Europe/Dublin',
'Europe/Gibraltar',
'Europe/Guernsey',
'Europe/Helsinki',
'Europe/Isle_of_Man',
'Europe/Istanbul',
'Europe/Jersey',
'Europe/Kaliningrad',
'Europe/Kiev',
'Europe/Lisbon',
'Europe/Ljubljana',
'Europe/London',
'Europe/Luxembourg',
'Europe/Madrid',
'Europe/Malta',
'Europe/Mariehamn',
'Europe/Minsk',
'Europe/Monaco',
'Europe/Moscow',
'Europe/Oslo',
'Europe/Paris',
'Europe/Podgorica',
'Europe/Prague',
'Europe/Riga',
'Europe/Rome',
'Europe/Samara',
'Europe/San_Marino',
'Europe/Sarajevo',
'Europe/Simferopol',
'Europe/Skopje',
'Europe/Sofia',
'Europe/Stockholm',
'Europe/Tallinn',
'Europe/Tirane',
'Europe/Uzhgorod',
'Europe/Vaduz',
'Europe/Vatican',
'Europe/Vienna',
'Europe/Vilnius',
'Europe/Volgograd',
'Europe/Warsaw',
'Europe/Zagreb',
'Europe/Zaporozhye',
'Europe/Zurich',
'GMT',
'Indian/Antananarivo',
'Indian/Chagos',
'Indian/Christmas',
'Indian/Cocos',
'Indian/Comoro',
'Indian/Kerguelen',
'Indian/Mahe',
'Indian/Maldives',
'Indian/Mauritius',
'Indian/Mayotte',
'Indian/Reunion',
'Pacific/Apia',
'Pacific/Auckland',
'Pacific/Bougainville',
'Pacific/Chatham',
'Pacific/Chuuk',
'Pacific/Easter',
'Pacific/Efate',
'Pacific/Enderbury',
'Pacific/Fakaofo',
'Pacific/Fiji',
'Pacific/Funafuti',
'Pacific/Galapagos',
'Pacific/Gambier',
'Pacific/Guadalcanal',
'Pacific/Guam',
'Pacific/Honolulu',
'Pacific/Johnston',
'Pacific/Kiritimati',
'Pacific/Kosrae',
'Pacific/Kwajalein',
'Pacific/Majuro',
'Pacific/Marquesas',
'Pacific/Midway',
'Pacific/Nauru',
'Pacific/Niue',
'Pacific/Norfolk',
'Pacific/Noumea',
'Pacific/Pago_Pago',
'Pacific/Palau',
'Pacific/Pitcairn',
'Pacific/Pohnpei',
'Pacific/Port_Moresby',
'Pacific/Rarotonga',
'Pacific/Saipan',
'Pacific/Tahiti',
'Pacific/Tarawa',
'Pacific/Tongatapu',
'Pacific/Wake',
'Pacific/Wallis',
'US/Alaska',
'US/Arizona',
'US/Central',
'US/Eastern',
'US/Hawaii',
'US/Mountain',
'US/Pacific',
'UTC']
common_timezones = LazyList(
tz for tz in common_timezones if tz in all_timezones)
common_timezones_set = LazySet(common_timezones)
|
|
# -*- coding: utf-8 -*-
import unittest
import six
from mongoengine import connect, Document, StringField
from mongoengine import signals
from mongoengine.connection import register_db, disconnect
signal_output = []
class BaseSignalTests(unittest.TestCase):
maxDiff = None
def get_signal_output(self, fn, *args, **kwargs):
# Flush any existing signal output
global signal_output
signal_output = []
fn(*args, **kwargs)
return signal_output
class ConnectSignalTests(BaseSignalTests):
"""
Testing signals before/after connecting.
"""
def setUp(self):
# Save up the number of connected signals so that we can check at
# the end that all the signals we register get properly unregistered
self.pre_signals = (
len(signals.pre_connect.receivers),
len(signals.post_connect.receivers),
)
self.pre_connect = lambda sender, settings: signal_output.append(
'pre_connect: sender={sender} settings={settings!r}'.format(
sender=sender,
settings=sorted(settings.keys()),
)
)
self.post_connect = lambda sender, settings, connection: signal_output.append(
'post_connect: sender={sender} settings={settings!r} connection={connection!r}'.format(
sender=sender,
settings=sorted(settings.keys()),
connection=type(connection),
)
)
signals.pre_connect.connect(self.pre_connect)
signals.post_connect.connect(self.post_connect)
# make sure we're not connected already
disconnect()
disconnect('nondefault')
def tearDown(self):
signals.pre_connect.disconnect(self.pre_connect)
signals.post_connect.disconnect(self.post_connect)
# Check that all our signals got disconnected properly.
post_signals = (
len(signals.pre_connect.receivers),
len(signals.post_connect.receivers),
)
self.assertEqual(self.pre_signals, post_signals)
def test_new_connection(self):
""" Call to connect() should fire the pre/post signals. """
self.assertEqual(self.get_signal_output(connect), [
"pre_connect: sender=default settings=['host', 'port', 'read_preference']",
"post_connect: sender=default settings=['host', 'port', 'read_preference'] connection=<class 'pymongo.mongo_client.MongoClient'>",
])
self.assertEqual(self.get_signal_output(connect, 'nondefault'), [
"pre_connect: sender=nondefault settings=['host', 'port', 'read_preference']",
"post_connect: sender=nondefault settings=['host', 'port', 'read_preference'] connection=<class 'pymongo.mongo_client.MongoClient'>",
])
def test_unknown_alias_connection(self):
""" Call to connect() should not fire the pre/post signals for unknown db alias. """
def test_already_connected(self):
""" Repeat call to connect() should not fire the pre/post signals. """
connect(alias='default', host='mongo')
self.assertEqual(self.get_signal_output(connect), [])
class DocumentSignalTests(BaseSignalTests):
"""
Testing signals before/after saving and deleting.
"""
def setUp(self):
connect(alias='default', host='mongo')
register_db('mongoenginetest')
@six.python_2_unicode_compatible
class Author(Document):
name = StringField()
def __str__(self):
return str(self.name)
@classmethod
def pre_init(cls, sender, document, *args, **kwargs):
signal_output.append('pre_init signal, %s' % cls.__name__)
signal_output.append(str(kwargs['values']))
@classmethod
def post_init(cls, sender, document, **kwargs):
signal_output.append('post_init signal, %s' % document)
@classmethod
def pre_save(cls, sender, document, **kwargs):
signal_output.append('pre_save signal, %s' % document)
@classmethod
def post_save(cls, sender, document, **kwargs):
signal_output.append('post_save signal, %s' % document)
if 'created' in kwargs:
if kwargs['created']:
signal_output.append('Is created')
else:
signal_output.append('Is updated')
@classmethod
def pre_delete(cls, sender, document, **kwargs):
signal_output.append('pre_delete signal, %s' % document)
@classmethod
def post_delete(cls, sender, document, **kwargs):
signal_output.append('post_delete signal, %s' % document)
@classmethod
def pre_bulk_insert(cls, sender, documents, **kwargs):
signal_output.append('pre_bulk_insert signal, %s' % documents)
@classmethod
def post_bulk_insert(cls, sender, documents, **kwargs):
signal_output.append('post_bulk_insert signal, %s' % documents)
if kwargs.get('loaded', False):
signal_output.append('Is loaded')
else:
signal_output.append('Not loaded')
self.Author = Author
@six.python_2_unicode_compatible
class Another(Document):
name = StringField()
def __str__(self):
return str(self.name)
@classmethod
def pre_init(cls, sender, document, **kwargs):
signal_output.append(
'pre_init Another signal, %s' % cls.__name__)
signal_output.append(str(kwargs['values']))
@classmethod
def post_init(cls, sender, document, **kwargs):
signal_output.append('post_init Another signal, %s' % document)
@classmethod
def pre_save(cls, sender, document, **kwargs):
signal_output.append('pre_save Another signal, %s' % document)
@classmethod
def post_save(cls, sender, document, **kwargs):
signal_output.append('post_save Another signal, %s' % document)
if 'created' in kwargs:
if kwargs['created']:
signal_output.append('Is created')
else:
signal_output.append('Is updated')
@classmethod
def pre_delete(cls, sender, document, **kwargs):
signal_output.append('pre_delete Another signal, %s' % document)
@classmethod
def post_delete(cls, sender, document, **kwargs):
signal_output.append(
'post_delete Another signal, %s' % document)
self.Another = Another
# Save up the number of connected signals so that we can check at
# the end that all the signals we register get properly unregistered
self.pre_signals = (
len(signals.pre_init.receivers),
len(signals.post_init.receivers),
len(signals.pre_save.receivers),
len(signals.post_save.receivers),
len(signals.pre_delete.receivers),
len(signals.post_delete.receivers),
len(signals.pre_bulk_insert.receivers),
len(signals.post_bulk_insert.receivers),
)
signals.pre_init.connect(Author.pre_init, sender=Author)
signals.post_init.connect(Author.post_init, sender=Author)
signals.pre_save.connect(Author.pre_save, sender=Author)
signals.post_save.connect(Author.post_save, sender=Author)
signals.pre_delete.connect(Author.pre_delete, sender=Author)
signals.post_delete.connect(Author.post_delete, sender=Author)
signals.pre_bulk_insert.connect(Author.pre_bulk_insert, sender=Author)
signals.post_bulk_insert.connect(Author.post_bulk_insert, sender=Author)
signals.pre_init.connect(Another.pre_init, sender=Another)
signals.post_init.connect(Another.post_init, sender=Another)
signals.pre_save.connect(Another.pre_save, sender=Another)
signals.post_save.connect(Another.post_save, sender=Another)
signals.pre_delete.connect(Another.pre_delete, sender=Another)
signals.post_delete.connect(Another.post_delete, sender=Another)
def tearDown(self):
signals.pre_init.disconnect(self.Author.pre_init)
signals.post_init.disconnect(self.Author.post_init)
signals.post_delete.disconnect(self.Author.post_delete)
signals.pre_delete.disconnect(self.Author.pre_delete)
signals.post_save.disconnect(self.Author.post_save)
signals.pre_save.disconnect(self.Author.pre_save)
signals.pre_bulk_insert.disconnect(self.Author.pre_bulk_insert)
signals.post_bulk_insert.disconnect(self.Author.post_bulk_insert)
signals.pre_init.disconnect(self.Another.pre_init)
signals.post_init.disconnect(self.Another.post_init)
signals.post_delete.disconnect(self.Another.post_delete)
signals.pre_delete.disconnect(self.Another.pre_delete)
signals.post_save.disconnect(self.Another.post_save)
signals.pre_save.disconnect(self.Another.pre_save)
# Check that all our signals got disconnected properly.
post_signals = (
len(signals.pre_init.receivers),
len(signals.post_init.receivers),
len(signals.pre_save.receivers),
len(signals.post_save.receivers),
len(signals.pre_delete.receivers),
len(signals.post_delete.receivers),
len(signals.pre_bulk_insert.receivers),
len(signals.post_bulk_insert.receivers),
)
self.assertEqual(self.pre_signals, post_signals)
def test_model_signals(self):
""" Model saves should throw some signals. """
def create_author():
self.Author(name='Bill Shakespeare')
def bulk_create_author_with_load():
a1 = self.Author(name='Bill Shakespeare')
self.Author.objects.insert([a1], load_bulk=True)
def bulk_create_author_without_load():
a1 = self.Author(name='Bill Shakespeare')
self.Author.objects.insert([a1], load_bulk=False)
self.assertEqual(self.get_signal_output(create_author), [
"pre_init signal, Author",
"{'name': 'Bill Shakespeare'}",
"post_init signal, Bill Shakespeare",
])
a1 = self.Author(name='Bill Shakespeare')
self.assertEqual(self.get_signal_output(a1.save), [
"pre_save signal, Bill Shakespeare",
"post_save signal, Bill Shakespeare",
"Is created"
])
a1.reload()
a1.name = 'William Shakespeare'
self.assertEqual(self.get_signal_output(a1.save), [
"pre_save signal, William Shakespeare",
"post_save signal, William Shakespeare",
"Is updated"
])
self.assertEqual(self.get_signal_output(a1.delete), [
'pre_delete signal, William Shakespeare',
'post_delete signal, William Shakespeare',
])
signal_output = self.get_signal_output(bulk_create_author_with_load)
# The output of this signal is not entirely deterministic. The reloaded
# object will have an object ID. Hence, we only check part of the output
self.assertEqual(
signal_output[3],
"pre_bulk_insert signal, [<Author: Bill Shakespeare>]")
self.assertEqual(
signal_output[-2:],
["post_bulk_insert signal, [<Author: Bill Shakespeare>]",
"Is loaded"])
self.assertEqual(
self.get_signal_output(bulk_create_author_without_load),
["pre_init signal, Author",
"{'name': 'Bill Shakespeare'}",
"post_init signal, Bill Shakespeare",
"pre_bulk_insert signal, [<Author: Bill Shakespeare>]",
"post_bulk_insert signal, [<Author: Bill Shakespeare>]",
"Not loaded"])
self.Author.objects.delete()
if __name__ == '__main__':
unittest.main()
|
|
from __future__ import absolute_import, division, print_function
import collections
import itertools as it
import operator
import warnings
import numpy as np
import pandas as pd
from .core import DataFrame, Series, aca, map_partitions, no_default
from .shuffle import shuffle
from .utils import make_meta, insert_meta_param_description, raise_on_meta_error
from ..base import tokenize
from ..utils import derived_from, M, funcname
def _maybe_slice(grouped, columns):
"""
Slice columns if grouped is pd.DataFrameGroupBy
"""
if isinstance(grouped, pd.core.groupby.DataFrameGroupBy):
if columns is not None:
columns = columns if isinstance(columns, str) else list(columns)
return grouped[columns]
return grouped
def _groupby_slice_apply(df, grouper, key, func):
g = df.groupby(grouper)
if key:
g = g[key]
return g.apply(func)
def _groupby_get_group(df, by_key, get_key, columns):
# SeriesGroupBy may pass df which includes group key
grouped = df.groupby(by_key)
if get_key in grouped.groups:
if isinstance(df, pd.DataFrame):
grouped = grouped[columns]
return grouped.get_group(get_key)
else:
# to create empty DataFrame/Series, which has the same
# dtype as the original
if isinstance(df, pd.DataFrame):
# may be SeriesGroupBy
df = df[columns]
return df.iloc[0:0]
###############################################################
# Aggregation
###############################################################
def _groupby_aggregate(df, aggfunc=None, levels=None):
return aggfunc(df.groupby(level=levels))
def _apply_chunk(df, index, func, columns):
if isinstance(df, pd.Series) or columns is None:
return func(df.groupby(index))
else:
columns = columns if isinstance(columns, str) else list(columns)
return func(df.groupby(index)[columns])
def _var_chunk(df, index):
if isinstance(df, pd.Series):
df = df.to_frame()
g = df.groupby(index)
x = g.sum()
x2 = g.agg(lambda x: (x**2).sum()).rename(columns=lambda c: c + '-x2')
n = g.count().rename(columns=lambda c: c + '-count')
return pd.concat([x, x2, n], axis=1)
def _var_combine(g):
return g.groupby(level=0).sum()
def _var_agg(g, ddof):
g = g.groupby(level=0).sum()
nc = len(g.columns)
x = g[g.columns[:nc // 3]]
x2 = g[g.columns[nc // 3:2 * nc // 3]].rename(columns=lambda c: c[:-3])
n = g[g.columns[-nc // 3:]].rename(columns=lambda c: c[:-6])
# TODO: replace with _finalize_var?
result = x2 - x ** 2 / n
div = (n - ddof)
div[div < 0] = 0
result /= div
result[(n - ddof) == 0] = np.nan
assert isinstance(result, pd.DataFrame)
return result
###############################################################
# nunique
###############################################################
def _nunique_df_chunk(df, index):
# we call set_index here to force a possibly duplicate index
# for our reduce step
grouped = df.groupby(index).apply(pd.DataFrame.drop_duplicates)
grouped.index = grouped.index.get_level_values(level=0)
return grouped
def _nunique_df_combine(df):
result = df.groupby(level=0).apply(pd.DataFrame.drop_duplicates)
result.index = result.index.get_level_values(level=0)
return result
def _nunique_df_aggregate(df, name):
return df.groupby(level=0)[name].nunique()
def _nunique_series_chunk(df, index):
assert isinstance(df, pd.Series)
if isinstance(index, np.ndarray):
assert len(index) == len(df)
index = pd.Series(index, index=df.index)
grouped = pd.concat([df, index], axis=1).drop_duplicates()
return grouped
def _nunique_series_combine(df):
return df.drop_duplicates()
def _nunique_series_aggregate(df):
return df.groupby(df.columns[1])[df.columns[0]].nunique()
###############################################################
# Aggregate support
#
# Aggregate is implemented as:
#
# 1. group-by-aggregate all partitions into intermediate values
# 2. collect all partitions into a single partition
# 3. group-by-aggregate the result into intermediate values
# 4. transform all intermediate values into the result
#
# In Step 1 and 3 the dataframe is grouped on the same columns.
#
###############################################################
def _make_agg_id(func, column):
return '{!s}-{!s}-{}'.format(func, column, tokenize(func, column))
def _normalize_spec(spec, non_group_columns):
"""
Return a list of ``(result_column, func, input_column)`` tuples.
Spec can be
- a function
- a list of functions
- a dictionary that maps input-columns to functions
- a dictionary that maps input-columns to a lists of functions
- a dictionary that maps input-columns to a dictionaries that map
output-columns to functions.
The non-group columns are a list of all column names that are not used in
the groupby operation.
Usually, the result columns are mutli-level names, returned as tuples.
If only a single function is supplied or dictionary mapping columns
to single functions, simple names are returned as strings (see the first
two examples below).
Examples
--------
>>> _normalize_spec('mean', ['a', 'b', 'c'])
[('a', 'mean', 'a'), ('b', 'mean', 'b'), ('c', 'mean', 'c')]
>>> spec = collections.OrderedDict([('a', 'mean'), ('b', 'count')])
>>> _normalize_spec(spec, ['a', 'b', 'c'])
[('a', 'mean', 'a'), ('b', 'count', 'b')]
>>> _normalize_spec(['var', 'mean'], ['a', 'b', 'c'])
... # doctest: +NORMALIZE_WHITESPACE
[(('a', 'var'), 'var', 'a'), (('a', 'mean'), 'mean', 'a'), \
(('b', 'var'), 'var', 'b'), (('b', 'mean'), 'mean', 'b'), \
(('c', 'var'), 'var', 'c'), (('c', 'mean'), 'mean', 'c')]
>>> spec = collections.OrderedDict([('a', 'mean'), ('b', ['sum', 'count'])])
>>> _normalize_spec(spec, ['a', 'b', 'c'])
... # doctest: +NORMALIZE_WHITESPACE
[(('a', 'mean'), 'mean', 'a'), (('b', 'sum'), 'sum', 'b'), \
(('b', 'count'), 'count', 'b')]
>>> spec = collections.OrderedDict()
>>> spec['a'] = ['mean', 'size']
>>> spec['b'] = collections.OrderedDict([('e', 'count'), ('f', 'var')])
>>> _normalize_spec(spec, ['a', 'b', 'c'])
... # doctest: +NORMALIZE_WHITESPACE
[(('a', 'mean'), 'mean', 'a'), (('a', 'size'), 'size', 'a'), \
(('b', 'e'), 'count', 'b'), (('b', 'f'), 'var', 'b')]
"""
if not isinstance(spec, dict):
spec = collections.OrderedDict(zip(non_group_columns, it.repeat(spec)))
res = []
if isinstance(spec, dict):
for input_column, subspec in spec.items():
if isinstance(subspec, dict):
res.extend(((input_column, result_column), func, input_column)
for result_column, func in subspec.items())
else:
if not isinstance(subspec, list):
subspec = [subspec]
res.extend(((input_column, funcname(func)), func, input_column)
for func in subspec)
else:
raise ValueError("unsupported agg spec of type {}".format(type(spec)))
compounds = (list, tuple, dict)
use_flat_columns = not any(isinstance(subspec, compounds)
for subspec in spec.values())
if use_flat_columns:
res = [(input_col, func, input_col) for (_, func, input_col) in res]
return res
def _build_agg_args(spec):
"""
Create transformation functions for a normalized aggregate spec.
Parameters
----------
spec: a list of (result-column, aggregation-function, input-column) triples.
To work with all arugment forms understood by pandas use
``_normalize_spec`` to normalize the argment before passing it on to
``_build_agg_args``.
Returns
-------
chunk_funcs: a list of (intermediate-column, function, keyword) triples
that are applied on grouped chunks of the initial dataframe.
agg_funcs: a list of (intermediate-column, functions, keword) triples that
are applied on the grouped concatination of the preprocessed chunks.
finalizers: a list of (result-column, function, keyword) triples that are
applied after the ``agg_funcs``. They are used to create final results
from intermediate representations.
"""
known_np_funcs = {np.min: 'min', np.max: 'max'}
chunks = {}
aggs = {}
finalizers = []
for (result_column, func, input_column) in spec:
func = funcname(known_np_funcs.get(func, func))
impls = _build_agg_args_single(result_column, func, input_column)
# overwrite existing result-columns, generate intermedates only once
chunks.update((spec[0], spec) for spec in impls['chunk_funcs'])
aggs.update((spec[0], spec) for spec in impls['aggregate_funcs'])
finalizers.append(impls['finalizer'])
chunks = sorted(chunks.values())
aggs = sorted(aggs.values())
return chunks, aggs, finalizers
def _build_agg_args_single(result_column, func, input_column):
simple_impl = {
'sum': (M.sum, M.sum),
'min': (M.min, M.min),
'max': (M.max, M.max),
'count': (M.count, M.sum),
'size': (M.size, M.sum),
}
if func in simple_impl.keys():
return _build_agg_args_simple(result_column, func, input_column,
simple_impl[func])
elif func == 'var':
return _build_agg_args_var(result_column, func, input_column)
elif func == 'std':
return _build_agg_args_std(result_column, func, input_column)
elif func == 'mean':
return _build_agg_args_mean(result_column, func, input_column)
else:
raise ValueError("unknown aggregate {}".format(func))
def _build_agg_args_simple(result_column, func, input_column, impl_pair):
intermediate = _make_agg_id(func, input_column)
chunk_impl, agg_impl = impl_pair
return dict(
chunk_funcs=[(intermediate, _apply_func_to_column,
dict(column=input_column, func=chunk_impl))],
aggregate_funcs=[(intermediate, _apply_func_to_column,
dict(column=intermediate, func=agg_impl))],
finalizer=(result_column, operator.itemgetter(intermediate), dict()),
)
def _build_agg_args_var(result_column, func, input_column):
int_sum = _make_agg_id('sum', input_column)
int_sum2 = _make_agg_id('sum2', input_column)
int_count = _make_agg_id('count', input_column)
return dict(
chunk_funcs=[
(int_sum, _apply_func_to_column,
dict(column=input_column, func=M.sum)),
(int_count, _apply_func_to_column,
dict(column=input_column, func=M.count)),
(int_sum2, _compute_sum_of_squares,
dict(column=input_column)),
],
aggregate_funcs=[
(col, _apply_func_to_column, dict(column=col, func=M.sum))
for col in (int_sum, int_count, int_sum2)
],
finalizer=(result_column, _finalize_var,
dict(sum_column=int_sum, count_column=int_count,
sum2_column=int_sum2)),
)
def _build_agg_args_std(result_column, func, input_column):
impls = _build_agg_args_var(result_column, func, input_column)
result_column, _, kwargs = impls['finalizer']
impls['finalizer'] = (result_column, _finalize_std, kwargs)
return impls
def _build_agg_args_mean(result_column, func, input_column):
int_sum = _make_agg_id('sum', input_column)
int_count = _make_agg_id('count', input_column)
return dict(
chunk_funcs=[
(int_sum, _apply_func_to_column,
dict(column=input_column, func=M.sum)),
(int_count, _apply_func_to_column,
dict(column=input_column, func=M.count)),
],
aggregate_funcs=[
(col, _apply_func_to_column, dict(column=col, func=M.sum))
for col in (int_sum, int_count)
],
finalizer=(result_column, _finalize_mean,
dict(sum_column=int_sum, count_column=int_count)),
)
def _groupby_apply_funcs(df, *index, **kwargs):
"""
Group a dataframe and apply multiple aggregation functions.
Parameters
----------
df: pandas.DataFrame
The dataframe to work on.
index: list of groupers
If given, they are added to the keyword arguments as the ``by``
argument.
funcs: list of result-colum, function, keywordargument triples
The list of functions that are applied on the grouped data frame.
Has to be passed as a keyword argument.
kwargs:
All keyword arguments, but ``funcs``, are passed verbatim to the groupby
operation of the dataframe
Returns
-------
aggregated:
the aggregated dataframe.
"""
if len(index):
kwargs.update(by=list(index))
funcs = kwargs.pop('funcs')
grouped = df.groupby(**kwargs)
result = collections.OrderedDict()
for result_column, func, func_kwargs in funcs:
result[result_column] = func(grouped, **func_kwargs)
return pd.DataFrame(result)
def _compute_sum_of_squares(grouped, column):
base = grouped[column] if column is not None else grouped
return base.apply(lambda x: (x ** 2).sum())
def _agg_finalize(df, funcs):
result = collections.OrderedDict()
for result_column, func, kwargs in funcs:
result[result_column] = func(df, **kwargs)
return pd.DataFrame(result)
def _apply_func_to_column(df_like, column, func):
if column is None:
return func(df_like)
return func(df_like[column])
def _finalize_mean(df, sum_column, count_column):
return df[sum_column] / df[count_column]
def _finalize_var(df, count_column, sum_column, sum2_column, ddof=1):
n = df[count_column]
x = df[sum_column]
x2 = df[sum2_column]
result = x2 - x ** 2 / n
div = (n - ddof)
div[div < 0] = 0
result /= div
result[(n - ddof) == 0] = np.nan
return result
def _finalize_std(df, count_column, sum_column, sum2_column, ddof=1):
result = _finalize_var(df, count_column, sum_column, sum2_column, ddof)
return np.sqrt(result)
def _normalize_index(df, index):
if not isinstance(df, DataFrame):
return index
elif isinstance(index, list):
return [_normalize_index(df, col) for col in index]
elif (isinstance(index, Series) and index.name in df.columns and
index._name == df[index.name]._name):
return index.name
elif (isinstance(index, DataFrame) and
set(index.columns).issubset(df.columns) and
index._name == df[index.columns]._name):
return list(index.columns)
else:
return index
class _GroupBy(object):
""" Superclass for DataFrameGroupBy and SeriesGroupBy
Parameters
----------
obj: DataFrame or Series
DataFrame or Series to be grouped
index: str, list or Series
The key for grouping
kwargs: dict
Other keywords passed to groupby
"""
def __init__(self, df, index=None, slice=None, **kwargs):
assert isinstance(df, (DataFrame, Series))
self.obj = df
# grouping key passed via groupby method
self.index = _normalize_index(df, index)
# slicing key applied to _GroupBy instance
self._slice = slice
self.kwargs = kwargs
if isinstance(index, Series) and df.divisions != index.divisions:
msg = ("The Series and index of the groupby"
" must have the same divisions.")
raise NotImplementedError(msg)
if self._is_grouped_by_sliced_column(self.obj, index):
# check whether given Series is taken from given df and unchanged.
# If any operations are performed, _name will be changed to
# e.g. "elemwise-xxxx"
# if group key (index) is a Series sliced from DataFrame,
# emulation must be performed as the same.
# otherwise, group key is regarded as a separate column
self._meta = self.obj._meta.groupby(self.obj._meta[index.name])
elif isinstance(self.index, Series):
self._meta = self.obj._meta.groupby(self.index._meta)
else:
self._meta = self.obj._meta.groupby(self.index)
def _is_grouped_by_sliced_column(self, df, index):
"""
Return whether index is a Series sliced from df
"""
if isinstance(df, Series):
return False
if (isinstance(index, Series) and index._name in df.columns and
index._name == df[index.name]._name):
return True
if (isinstance(index, DataFrame) and
set(index.columns).issubset(df.columns) and
index._name == df[index.columns]._name):
index = list(index.columns)
return True
return False
@property
def _meta_nonempty(self):
"""
Return a pd.DataFrameGroupBy / pd.SeriesGroupBy which contains sample data.
"""
sample = self.obj._meta_nonempty
if isinstance(self.index, Series):
if self._is_grouped_by_sliced_column(self.obj, self.index):
grouped = sample.groupby(sample[self.index.name])
else:
grouped = sample.groupby(self.index._meta_nonempty)
else:
grouped = sample.groupby(self.index)
return _maybe_slice(grouped, self._slice)
def _aca_agg(self, token, func, aggfunc=None, split_every=None):
if aggfunc is None:
aggfunc = func
meta = func(self._meta)
columns = meta.name if isinstance(meta, pd.Series) else meta.columns
token = self._token_prefix + token
if isinstance(self.index, (tuple, list)) and len(self.index) > 1:
levels = list(range(len(self.index)))
else:
levels = 0
return aca([self.obj, self.index, func, columns],
chunk=_apply_chunk, aggregate=_groupby_aggregate,
meta=meta, token=token, split_every=split_every,
aggregate_kwargs=dict(aggfunc=aggfunc, levels=levels))
@derived_from(pd.core.groupby.GroupBy)
def sum(self, split_every=None):
return self._aca_agg(token='sum', func=M.sum, split_every=split_every)
@derived_from(pd.core.groupby.GroupBy)
def min(self, split_every=None):
return self._aca_agg(token='min', func=M.min, split_every=split_every)
@derived_from(pd.core.groupby.GroupBy)
def max(self, split_every=None):
return self._aca_agg(token='max', func=M.max, split_every=split_every)
@derived_from(pd.core.groupby.GroupBy)
def count(self, split_every=None):
return self._aca_agg(token='count', func=M.count,
aggfunc=M.sum, split_every=split_every)
@derived_from(pd.core.groupby.GroupBy)
def mean(self, split_every=None):
return self.sum(split_every=split_every) / self.count(split_every=split_every)
@derived_from(pd.core.groupby.GroupBy)
def size(self, split_every=None):
return self._aca_agg(token='size', func=M.size, aggfunc=M.sum,
split_every=split_every)
@derived_from(pd.core.groupby.GroupBy)
def var(self, ddof=1, split_every=None):
result = aca([self.obj, self.index], chunk=_var_chunk,
aggregate=_var_agg, combine=_var_combine,
token=self._token_prefix + 'var',
aggregate_kwargs={'ddof': ddof}, split_every=split_every)
if isinstance(self.obj, Series):
result = result[result.columns[0]]
if self._slice:
result = result[self._slice]
return result
@derived_from(pd.core.groupby.GroupBy)
def std(self, ddof=1, split_every=None):
v = self.var(ddof, split_every=split_every)
result = map_partitions(np.sqrt, v, meta=v)
return result
@derived_from(pd.core.groupby.GroupBy)
def get_group(self, key):
token = self._token_prefix + 'get_group'
meta = self._meta.obj
if isinstance(meta, pd.DataFrame) and self._slice is not None:
meta = meta[self._slice]
columns = meta.columns if isinstance(meta, pd.DataFrame) else meta.name
return map_partitions(_groupby_get_group, self.obj, self.index, key,
columns, meta=meta, token=token)
def aggregate(self, arg, split_every):
if isinstance(self.obj, DataFrame):
if isinstance(self.index, tuple) or np.isscalar(self.index):
group_columns = {self.index}
elif isinstance(self.index, list):
group_columns = {i for i in self.index
if isinstance(i, tuple) or np.isscalar(i)}
else:
group_columns = set()
# NOTE: this step relies on the index normalization to replace
# series with their name in an index.
non_group_columns = [col for col in self.obj.columns
if col not in group_columns]
spec = _normalize_spec(arg, non_group_columns)
elif isinstance(self.obj, Series):
# implementation detail: if self.obj is a series, a pseudo column
# None is used to denote the series itself. This pseudo column is
# removed from the result columns before passing the spec along.
spec = _normalize_spec({None: arg}, [])
spec = [(result_column, func, input_column)
for ((_, result_column), func, input_column) in spec]
else:
raise ValueError("aggregate on unknown object {}".format(self.obj))
chunk_funcs, aggregate_funcs, finalizers = _build_agg_args(spec)
if isinstance(self.index, (tuple, list)) and len(self.index) > 1:
levels = list(range(len(self.index)))
else:
levels = 0
# apply the transformations to determine the meta object
meta_groupby = pd.Series([], dtype=bool, index=self.obj._meta.index)
meta_stage1 = _groupby_apply_funcs(self.obj._meta, funcs=chunk_funcs,
by=meta_groupby)
meta_stage2 = _groupby_apply_funcs(meta_stage1, funcs=aggregate_funcs,
level=0)
meta = _agg_finalize(meta_stage2, finalizers)
if not isinstance(self.index, list):
chunk_args = [self.obj, self.index]
else:
chunk_args = [self.obj] + self.index
obj = aca(chunk_args,
chunk=_groupby_apply_funcs,
chunk_kwargs=dict(funcs=chunk_funcs),
aggregate=_groupby_apply_funcs,
aggregate_kwargs=dict(funcs=aggregate_funcs, level=levels),
combine=_groupby_apply_funcs,
combine_kwargs=dict(funcs=aggregate_funcs, level=levels),
meta=meta, token='aggregate', split_every=split_every)
return map_partitions(_agg_finalize, obj, meta=meta,
token='aggregate-finalize', funcs=finalizers)
@insert_meta_param_description(pad=12)
def apply(self, func, meta=no_default, columns=no_default):
""" Parallel version of pandas GroupBy.apply
This mimics the pandas version except for the following:
1. The user should provide output metadata.
2. If the grouper does not align with the index then this causes a full
shuffle. The order of rows within each group may not be preserved.
Parameters
----------
func: function
Function to apply
$META
columns: list, scalar or None
Deprecated, use `meta` instead. If list is given, the result is a
DataFrame which columns is specified list. Otherwise, the result is
a Series which name is given scalar or None (no name). If name
keyword is not given, dask tries to infer the result type using its
beginning of data. This inference may take some time and lead to
unexpected result
Returns
-------
applied : Series or DataFrame depending on columns keyword
"""
if columns is not no_default:
warnings.warn("`columns` is deprecated, please use `meta` instead")
if meta is no_default and isinstance(columns, (pd.DataFrame, pd.Series)):
meta = columns
if meta is no_default:
msg = ("`meta` is not specified, inferred from partial data. "
"Please provide `meta` if the result is unexpected.\n"
" Before: .apply(func)\n"
" After: .apply(func, meta={'x': 'f8', 'y': 'f8'}) for dataframe result\n"
" or: .apply(func, meta=('x', 'f8')) for series result")
warnings.warn(msg)
with raise_on_meta_error("groupby.apply({0})".format(funcname(func))):
meta = self._meta_nonempty.apply(func)
meta = make_meta(meta)
df = self.obj
if isinstance(self.index, DataFrame): # add index columns to dataframe
df2 = df.assign(**{'_index_' + c: self.index[c]
for c in self.index.columns})
index = self.index
elif isinstance(self.index, Series):
df2 = df.assign(_index=self.index)
index = self.index
else:
df2 = df
index = df[self.index]
df3 = shuffle(df2, index, **self.kwargs) # shuffle dataframe and index
if isinstance(self.index, DataFrame): # extract index from dataframe
cols = ['_index_' + c for c in self.index.columns]
index2 = df3[cols]
df4 = df3.drop(cols, axis=1, dtype=meta.columns.dtype if
isinstance(meta, pd.DataFrame) else None)
elif isinstance(self.index, Series):
index2 = df3['_index']
index2.name = self.index.name
df4 = df3.drop('_index', axis=1, dtype=meta.columns.dtype if
isinstance(meta, DataFrame) else None)
else:
df4 = df3
index2 = self.index
# Perform embarrassingly parallel groupby-apply
df5 = map_partitions(_groupby_slice_apply, df4, index2,
self._slice, func, meta=meta)
return df5
class DataFrameGroupBy(_GroupBy):
_token_prefix = 'dataframe-groupby-'
def __init__(self, df, index=None, slice=None, **kwargs):
if not kwargs.get('as_index', True):
msg = ("The keyword argument `as_index=False` is not supported in "
"dask.dataframe.groupby")
raise NotImplementedError(msg)
super(DataFrameGroupBy, self).__init__(df, index=index,
slice=slice, **kwargs)
def __getitem__(self, key):
if isinstance(key, list):
g = DataFrameGroupBy(self.obj, index=self.index,
slice=key, **self.kwargs)
else:
g = SeriesGroupBy(self.obj, index=self.index,
slice=key, **self.kwargs)
# error is raised from pandas
g._meta = g._meta[key]
return g
def __dir__(self):
return sorted(set(dir(type(self)) + list(self.__dict__) +
list(filter(pd.compat.isidentifier, self.obj.columns))))
def __getattr__(self, key):
try:
return self[key]
except KeyError as e:
raise AttributeError(e)
@derived_from(pd.core.groupby.DataFrameGroupBy)
def aggregate(self, arg, split_every=None):
if arg == 'size':
return self.size()
return super(DataFrameGroupBy, self).aggregate(arg, split_every=split_every)
@derived_from(pd.core.groupby.DataFrameGroupBy)
def agg(self, arg, split_every=None):
return self.aggregate(arg, split_every=split_every)
class SeriesGroupBy(_GroupBy):
_token_prefix = 'series-groupby-'
def __init__(self, df, index, slice=None, **kwargs):
# raise pandas-compat error message
if isinstance(df, Series):
# When obj is Series, index must be Series
if not isinstance(index, Series):
if isinstance(index, list):
if len(index) == 0:
raise ValueError("No group keys passed!")
msg = "Grouper for '{0}' not 1-dimensional"
raise ValueError(msg.format(index[0]))
# raise error from pandas
df._meta.groupby(index)
super(SeriesGroupBy, self).__init__(df, index=index,
slice=slice, **kwargs)
def nunique(self, split_every=None):
name = self._meta.obj.name
meta = pd.Series([], dtype='int64',
index=pd.Index([], dtype=self._meta.obj.dtype),
name=name)
if isinstance(self.obj, DataFrame):
return aca([self.obj, self.index],
chunk=_nunique_df_chunk,
aggregate=_nunique_df_aggregate,
combine=_nunique_df_combine,
meta=meta, token='series-groupby-nunique',
aggregate_kwargs={'name': name},
split_every=split_every)
else:
return aca([self.obj, self.index],
chunk=_nunique_series_chunk,
aggregate=_nunique_series_aggregate,
combine=_nunique_series_combine,
meta=meta, token='series-groupby-nunique',
split_every=split_every)
@derived_from(pd.core.groupby.SeriesGroupBy)
def aggregate(self, arg, split_every=None):
# short-circuit 'simple' aggregations
if (
not isinstance(arg, (list, dict)) and
arg in {'sum', 'mean', 'var', 'size', 'std', 'count'}
):
return getattr(self, arg)(split_every=split_every)
return super(SeriesGroupBy, self).aggregate(arg, split_every=split_every)
@derived_from(pd.core.groupby.SeriesGroupBy)
def agg(self, arg, split_every=None):
return self.aggregate(arg, split_every=split_every)
|
|
# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
r"""Converts checkpoint variables into Const ops in a standalone GraphDef file.
This script is designed to take a GraphDef proto, a SaverDef proto, and a set of
variable values stored in a checkpoint file, and output a GraphDef with all of
the variable ops converted into const ops containing the values of the
variables.
It's useful to do this when we need to load a single file in C++, especially in
environments like mobile or embedded where we may not have access to the
RestoreTensor ops and file loading calls that they rely on.
An example of command-line usage is:
bazel build tensorflow/python/tools:freeze_graph && \
bazel-bin/tensorflow/python/tools/freeze_graph \
--input_graph=some_graph_def.pb \
--input_checkpoint=model.ckpt-8361242 \
--output_graph=/tmp/frozen_graph.pb --output_node_names=softmax
You can also look at freeze_graph_test.py for an example of how to use it.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import argparse
import re
import sys
from google.protobuf import text_format
from tensorflow.core.framework import graph_pb2
from tensorflow.core.protobuf import saver_pb2
from tensorflow.core.protobuf.meta_graph_pb2 import MetaGraphDef
from tensorflow.python import pywrap_tensorflow
from tensorflow.python.client import session
from tensorflow.python.framework import graph_util
from tensorflow.python.framework import importer
from tensorflow.python.platform import app
from tensorflow.python.platform import gfile
from tensorflow.python.saved_model import loader
from tensorflow.python.saved_model import tag_constants
from tensorflow.python.tools import saved_model_utils
from tensorflow.python.training import checkpoint_management
from tensorflow.python.training import saver as saver_lib
def _has_no_variables(sess):
"""Determines if the graph has any variables.
Args:
sess: TensorFlow Session.
Returns:
Bool.
"""
for op in sess.graph.get_operations():
if op.type.startswith("Variable") or op.type.endswith("VariableOp"):
return False
return True
def freeze_graph_with_def_protos(input_graph_def,
input_saver_def,
input_checkpoint,
output_node_names,
restore_op_name,
filename_tensor_name,
output_graph,
clear_devices,
initializer_nodes,
variable_names_whitelist="",
variable_names_blacklist="",
input_meta_graph_def=None,
input_saved_model_dir=None,
saved_model_tags=None,
checkpoint_version=saver_pb2.SaverDef.V2):
"""Converts all variables in a graph and checkpoint into constants.
Args:
input_graph_def: A `GraphDef`.
input_saver_def: A `SaverDef` (optional).
input_checkpoint: The prefix of a V1 or V2 checkpoint, with V2 taking
priority. Typically the result of `Saver.save()` or that of
`tf.train.latest_checkpoint()`, regardless of sharded/non-sharded or
V1/V2.
output_node_names: The name(s) of the output nodes, comma separated.
restore_op_name: Unused.
filename_tensor_name: Unused.
output_graph: String where to write the frozen `GraphDef`.
clear_devices: A Bool whether to remove device specifications.
initializer_nodes: Comma separated string of initializer nodes to run before
freezing.
variable_names_whitelist: The set of variable names to convert (optional, by
default, all variables are converted).
variable_names_blacklist: The set of variable names to omit converting
to constants (optional).
input_meta_graph_def: A `MetaGraphDef` (optional),
input_saved_model_dir: Path to the dir with TensorFlow 'SavedModel' file
and variables (optional).
saved_model_tags: Group of comma separated tag(s) of the MetaGraphDef to
load, in string format (optional).
checkpoint_version: Tensorflow variable file format (saver_pb2.SaverDef.V1
or saver_pb2.SaverDef.V2)
Returns:
Location of the output_graph_def.
"""
del restore_op_name, filename_tensor_name # Unused by updated loading code.
# 'input_checkpoint' may be a prefix if we're using Saver V2 format
if (not input_saved_model_dir and
not checkpoint_management.checkpoint_exists(input_checkpoint)):
print("Input checkpoint '" + input_checkpoint + "' doesn't exist!")
return -1
if not output_node_names:
print("You need to supply the name of a node to --output_node_names.")
return -1
# Remove all the explicit device specifications for this node. This helps to
# make the graph more portable.
if clear_devices:
if input_meta_graph_def:
for node in input_meta_graph_def.graph_def.node:
node.device = ""
elif input_graph_def:
for node in input_graph_def.node:
node.device = ""
if input_graph_def:
_ = importer.import_graph_def(input_graph_def, name="")
with session.Session() as sess:
if input_saver_def:
saver = saver_lib.Saver(
saver_def=input_saver_def, write_version=checkpoint_version)
saver.restore(sess, input_checkpoint)
elif input_meta_graph_def:
restorer = saver_lib.import_meta_graph(
input_meta_graph_def, clear_devices=True)
restorer.restore(sess, input_checkpoint)
if initializer_nodes:
sess.run(initializer_nodes.replace(" ", "").split(","))
elif input_saved_model_dir:
if saved_model_tags is None:
saved_model_tags = []
loader.load(sess, saved_model_tags, input_saved_model_dir)
else:
var_list = {}
reader = pywrap_tensorflow.NewCheckpointReader(input_checkpoint)
var_to_shape_map = reader.get_variable_to_shape_map()
# List of all partition variables. Because the condition is heuristic
# based, the list could include false positives.
all_parition_variable_names = [
tensor.name.split(":")[0]
for op in sess.graph.get_operations()
for tensor in op.values()
if re.search(r"/part_\d+/", tensor.name)
]
has_partition_var = False
for key in var_to_shape_map:
try:
tensor = sess.graph.get_tensor_by_name(key + ":0")
if any(key in name for name in all_parition_variable_names):
has_partition_var = True
except KeyError:
# This tensor doesn't exist in the graph (for example it's
# 'global_step' or a similar housekeeping element) so skip it.
continue
var_list[key] = tensor
try:
saver = saver_lib.Saver(
var_list=var_list, write_version=checkpoint_version)
except TypeError as e:
# `var_list` is required to be a map of variable names to Variable
# tensors. Partition variables are Identity tensors that cannot be
# handled by Saver.
if has_partition_var:
print("Models containing partition variables cannot be converted "
"from checkpoint files. Please pass in a SavedModel using "
"the flag --input_saved_model_dir.")
return -1
# Models that have been frozen previously do not contain Variables.
elif _has_no_variables(sess):
print("No variables were found in this model. It is likely the model "
"was frozen previously. You cannot freeze a graph twice.")
return 0
else:
raise e
saver.restore(sess, input_checkpoint)
if initializer_nodes:
sess.run(initializer_nodes.replace(" ", "").split(","))
variable_names_whitelist = (
variable_names_whitelist.replace(" ", "").split(",")
if variable_names_whitelist else None)
variable_names_blacklist = (
variable_names_blacklist.replace(" ", "").split(",")
if variable_names_blacklist else None)
if input_meta_graph_def:
output_graph_def = graph_util.convert_variables_to_constants(
sess,
input_meta_graph_def.graph_def,
output_node_names.replace(" ", "").split(","),
variable_names_whitelist=variable_names_whitelist,
variable_names_blacklist=variable_names_blacklist)
else:
output_graph_def = graph_util.convert_variables_to_constants(
sess,
input_graph_def,
output_node_names.replace(" ", "").split(","),
variable_names_whitelist=variable_names_whitelist,
variable_names_blacklist=variable_names_blacklist)
# Write GraphDef to file if output path has been given.
if output_graph:
with gfile.GFile(output_graph, "wb") as f:
f.write(output_graph_def.SerializeToString())
return output_graph_def
def _parse_input_graph_proto(input_graph, input_binary):
"""Parser input tensorflow graph into GraphDef proto."""
if not gfile.Exists(input_graph):
print("Input graph file '" + input_graph + "' does not exist!")
return -1
input_graph_def = graph_pb2.GraphDef()
mode = "rb" if input_binary else "r"
with gfile.GFile(input_graph, mode) as f:
if input_binary:
input_graph_def.ParseFromString(f.read())
else:
text_format.Merge(f.read(), input_graph_def)
return input_graph_def
def _parse_input_meta_graph_proto(input_graph, input_binary):
"""Parser input tensorflow graph into MetaGraphDef proto."""
if not gfile.Exists(input_graph):
print("Input meta graph file '" + input_graph + "' does not exist!")
return -1
input_meta_graph_def = MetaGraphDef()
mode = "rb" if input_binary else "r"
with gfile.GFile(input_graph, mode) as f:
if input_binary:
input_meta_graph_def.ParseFromString(f.read())
else:
text_format.Merge(f.read(), input_meta_graph_def)
print("Loaded meta graph file '" + input_graph)
return input_meta_graph_def
def _parse_input_saver_proto(input_saver, input_binary):
"""Parser input tensorflow Saver into SaverDef proto."""
if not gfile.Exists(input_saver):
print("Input saver file '" + input_saver + "' does not exist!")
return -1
mode = "rb" if input_binary else "r"
with gfile.GFile(input_saver, mode) as f:
saver_def = saver_pb2.SaverDef()
if input_binary:
saver_def.ParseFromString(f.read())
else:
text_format.Merge(f.read(), saver_def)
return saver_def
def freeze_graph(input_graph,
input_saver,
input_binary,
input_checkpoint,
output_node_names,
restore_op_name,
filename_tensor_name,
output_graph,
clear_devices,
initializer_nodes,
variable_names_whitelist="",
variable_names_blacklist="",
input_meta_graph=None,
input_saved_model_dir=None,
saved_model_tags=tag_constants.SERVING,
checkpoint_version=saver_pb2.SaverDef.V2):
"""Converts all variables in a graph and checkpoint into constants.
Args:
input_graph: A `GraphDef` file to load.
input_saver: A TensorFlow Saver file.
input_binary: A Bool. True means input_graph is .pb, False indicates .pbtxt.
input_checkpoint: The prefix of a V1 or V2 checkpoint, with V2 taking
priority. Typically the result of `Saver.save()` or that of
`tf.train.latest_checkpoint()`, regardless of sharded/non-sharded or
V1/V2.
output_node_names: The name(s) of the output nodes, comma separated.
restore_op_name: Unused.
filename_tensor_name: Unused.
output_graph: String where to write the frozen `GraphDef`.
clear_devices: A Bool whether to remove device specifications.
initializer_nodes: Comma separated list of initializer nodes to run before
freezing.
variable_names_whitelist: The set of variable names to convert (optional, by
default, all variables are converted),
variable_names_blacklist: The set of variable names to omit converting
to constants (optional).
input_meta_graph: A `MetaGraphDef` file to load (optional).
input_saved_model_dir: Path to the dir with TensorFlow 'SavedModel' file and
variables (optional).
saved_model_tags: Group of comma separated tag(s) of the MetaGraphDef to
load, in string format.
checkpoint_version: Tensorflow variable file format (saver_pb2.SaverDef.V1
or saver_pb2.SaverDef.V2).
Returns:
String that is the location of frozen GraphDef.
"""
input_graph_def = None
if input_saved_model_dir:
input_graph_def = saved_model_utils.get_meta_graph_def(
input_saved_model_dir, saved_model_tags).graph_def
elif input_graph:
input_graph_def = _parse_input_graph_proto(input_graph, input_binary)
input_meta_graph_def = None
if input_meta_graph:
input_meta_graph_def = _parse_input_meta_graph_proto(
input_meta_graph, input_binary)
input_saver_def = None
if input_saver:
input_saver_def = _parse_input_saver_proto(input_saver, input_binary)
freeze_graph_with_def_protos(
input_graph_def,
input_saver_def,
input_checkpoint,
output_node_names,
restore_op_name,
filename_tensor_name,
output_graph,
clear_devices,
initializer_nodes,
variable_names_whitelist,
variable_names_blacklist,
input_meta_graph_def,
input_saved_model_dir,
saved_model_tags.replace(" ", "").split(","),
checkpoint_version=checkpoint_version)
def main(unused_args, flags):
if flags.checkpoint_version == 1:
checkpoint_version = saver_pb2.SaverDef.V1
elif flags.checkpoint_version == 2:
checkpoint_version = saver_pb2.SaverDef.V2
else:
print("Invalid checkpoint version (must be '1' or '2'): %d" %
flags.checkpoint_version)
return -1
freeze_graph(flags.input_graph, flags.input_saver, flags.input_binary,
flags.input_checkpoint, flags.output_node_names,
flags.restore_op_name, flags.filename_tensor_name,
flags.output_graph, flags.clear_devices, flags.initializer_nodes,
flags.variable_names_whitelist, flags.variable_names_blacklist,
flags.input_meta_graph, flags.input_saved_model_dir,
flags.saved_model_tags, checkpoint_version)
def run_main():
parser = argparse.ArgumentParser()
parser.register("type", "bool", lambda v: v.lower() == "true")
parser.add_argument(
"--input_graph",
type=str,
default="",
help="TensorFlow \'GraphDef\' file to load.")
parser.add_argument(
"--input_saver",
type=str,
default="",
help="TensorFlow saver file to load.")
parser.add_argument(
"--input_checkpoint",
type=str,
default="",
help="TensorFlow variables file to load.")
parser.add_argument(
"--checkpoint_version",
type=int,
default=2,
help="Tensorflow variable file format")
parser.add_argument(
"--output_graph",
type=str,
default="",
help="Output \'GraphDef\' file name.")
parser.add_argument(
"--input_binary",
nargs="?",
const=True,
type="bool",
default=False,
help="Whether the input files are in binary format.")
parser.add_argument(
"--output_node_names",
type=str,
default="",
help="The name of the output nodes, comma separated.")
parser.add_argument(
"--restore_op_name",
type=str,
default="save/restore_all",
help="""\
The name of the master restore operator. Deprecated, unused by updated \
loading code.
""")
parser.add_argument(
"--filename_tensor_name",
type=str,
default="save/Const:0",
help="""\
The name of the tensor holding the save path. Deprecated, unused by \
updated loading code.
""")
parser.add_argument(
"--clear_devices",
nargs="?",
const=True,
type="bool",
default=True,
help="Whether to remove device specifications.")
parser.add_argument(
"--initializer_nodes",
type=str,
default="",
help="Comma separated list of initializer nodes to run before freezing.")
parser.add_argument(
"--variable_names_whitelist",
type=str,
default="",
help="""\
Comma separated list of variables to convert to constants. If specified, \
only those variables will be converted to constants.\
""")
parser.add_argument(
"--variable_names_blacklist",
type=str,
default="",
help="""\
Comma separated list of variables to skip converting to constants.\
""")
parser.add_argument(
"--input_meta_graph",
type=str,
default="",
help="TensorFlow \'MetaGraphDef\' file to load.")
parser.add_argument(
"--input_saved_model_dir",
type=str,
default="",
help="Path to the dir with TensorFlow \'SavedModel\' file and variables.")
parser.add_argument(
"--saved_model_tags",
type=str,
default="serve",
help="""\
Group of tag(s) of the MetaGraphDef to load, in string format,\
separated by \',\'. For tag-set contains multiple tags, all tags \
must be passed in.\
""")
flags, unparsed = parser.parse_known_args()
my_main = lambda unused_args: main(unused_args, flags)
app.run(main=my_main, argv=[sys.argv[0]] + unparsed)
if __name__ == '__main__':
run_main()
|
|
# -*- coding: utf-8 -*-
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import sys
import json
import time
from airflow.contrib.hooks.gcs_hook import GoogleCloudStorageHook
from airflow.hooks.mysql_hook import MySqlHook
from airflow.models import BaseOperator
from airflow.utils.decorators import apply_defaults
from datetime import date, datetime
from decimal import Decimal
from MySQLdb.constants import FIELD_TYPE
from tempfile import NamedTemporaryFile
from six import string_types, binary_type
PY3 = sys.version_info[0] == 3
class MySqlToGoogleCloudStorageOperator(BaseOperator):
"""
Copy data from MySQL to Google cloud storage in JSON format.
"""
template_fields = ('sql', 'bucket', 'filename', 'schema_filename', 'schema')
template_ext = ('.sql',)
ui_color = '#a0e08c'
@apply_defaults
def __init__(self,
sql,
bucket,
filename,
schema_filename=None,
approx_max_file_size_bytes=1900000000,
mysql_conn_id='mysql_default',
google_cloud_storage_conn_id='google_cloud_default',
schema=None,
delegate_to=None,
*args,
**kwargs):
"""
:param sql: The SQL to execute on the MySQL table.
:type sql: string
:param bucket: The bucket to upload to.
:type bucket: string
:param filename: The filename to use as the object name when uploading
to Google cloud storage. A {} should be specified in the filename
to allow the operator to inject file numbers in cases where the
file is split due to size.
:type filename: string
:param schema_filename: If set, the filename to use as the object name
when uploading a .json file containing the BigQuery schema fields
for the table that was dumped from MySQL.
:type schema_filename: string
:param approx_max_file_size_bytes: This operator supports the ability
to split large table dumps into multiple files (see notes in the
filenamed param docs above). Google cloud storage allows for files
to be a maximum of 4GB. This param allows developers to specify the
file size of the splits.
:type approx_max_file_size_bytes: long
:param mysql_conn_id: Reference to a specific MySQL hook.
:type mysql_conn_id: string
:param google_cloud_storage_conn_id: Reference to a specific Google
cloud storage hook.
:type google_cloud_storage_conn_id: string
:param schema: The schema to use, if any. Should be a list of dict or
a str. Examples could be see: https://cloud.google.com/bigquery
/docs/schemas#specifying_a_json_schema_file
:type schema: str or list
:param delegate_to: The account to impersonate, if any. For this to
work, the service account making the request must have domain-wide
delegation enabled.
"""
super(MySqlToGoogleCloudStorageOperator, self).__init__(*args, **kwargs)
self.sql = sql
self.bucket = bucket
self.filename = filename
self.schema_filename = schema_filename
self.approx_max_file_size_bytes = approx_max_file_size_bytes
self.mysql_conn_id = mysql_conn_id
self.google_cloud_storage_conn_id = google_cloud_storage_conn_id
self.schema = schema
self.delegate_to = delegate_to
def execute(self, context):
cursor = self._query_mysql()
files_to_upload = self._write_local_data_files(cursor)
# If a schema is set, create a BQ schema JSON file.
if self.schema_filename:
files_to_upload.update(self._write_local_schema_file(cursor))
# Flush all files before uploading
for file_handle in files_to_upload.values():
file_handle.flush()
self._upload_to_gcs(files_to_upload)
# Close all temp file handles.
for file_handle in files_to_upload.values():
file_handle.close()
def _query_mysql(self):
"""
Queries mysql and returns a cursor to the results.
"""
mysql = MySqlHook(mysql_conn_id=self.mysql_conn_id)
conn = mysql.get_conn()
cursor = conn.cursor()
cursor.execute(self.sql)
return cursor
def _write_local_data_files(self, cursor):
"""
Takes a cursor, and writes results to a local file.
:return: A dictionary where keys are filenames to be used as object
names in GCS, and values are file handles to local files that
contain the data for the GCS objects.
"""
class BinaryTypeEncoder(json.JSONEncoder):
def default(self, obj):
if PY3 and isinstance(obj, binary_type):
return str(obj, 'utf-8')
return json.JSONEncoder.default(self, obj)
schema = list(map(lambda schema_tuple: schema_tuple[0], cursor.description))
file_no = 0
tmp_file_handle = NamedTemporaryFile(delete=True)
tmp_file_handles = {self.filename.format(file_no): tmp_file_handle}
for row in cursor:
# Convert datetime objects to utc seconds, and decimals to floats
row = map(self.convert_types, row)
row_dict = dict(zip(schema, row))
# TODO validate that row isn't > 2MB. BQ enforces a hard row size of 2MB.
s = json.dumps(row_dict, cls=BinaryTypeEncoder)
if PY3:
s = s.encode('utf-8')
tmp_file_handle.write(s)
# Append newline to make dumps BigQuery compatible.
tmp_file_handle.write(b'\n')
# Stop if the file exceeds the file size limit.
if tmp_file_handle.tell() >= self.approx_max_file_size_bytes:
file_no += 1
tmp_file_handle = NamedTemporaryFile(delete=True)
tmp_file_handles[self.filename.format(file_no)] = tmp_file_handle
return tmp_file_handles
def _write_local_schema_file(self, cursor):
"""
Takes a cursor, and writes the BigQuery schema for the results to a
local file system.
:return: A dictionary where key is a filename to be used as an object
name in GCS, and values are file handles to local files that
contains the BigQuery schema fields in .json format.
"""
schema_str = None
tmp_schema_file_handle = NamedTemporaryFile(delete=True)
if self.schema is not None and isinstance(self.schema, string_types):
schema_str = self.schema
else:
schema = []
if self.schema is not None and isinstance(self.schema, list):
schema = self.schema
else:
for field in cursor.description:
# See PEP 249 for details about the description tuple.
field_name = field[0]
field_type = self.type_map(field[1])
# Always allow TIMESTAMP to be nullable. MySQLdb returns None types
# for required fields because some MySQL timestamps can't be
# represented by Python's datetime (e.g. 0000-00-00 00:00:00).
if field[6] or field_type == 'TIMESTAMP':
field_mode = 'NULLABLE'
else:
field_mode = 'REQUIRED'
schema.append({
'name': field_name,
'type': field_type,
'mode': field_mode,
})
schema_str = json.dumps(schema)
if PY3:
schema_str = schema_str.encode('utf-8')
tmp_schema_file_handle.write(schema_str)
self.log.info('Using schema for %s: %s', self.schema_filename, schema_str)
return {self.schema_filename: tmp_schema_file_handle}
def _upload_to_gcs(self, files_to_upload):
"""
Upload all of the file splits (and optionally the schema .json file) to
Google cloud storage.
"""
hook = GoogleCloudStorageHook(
google_cloud_storage_conn_id=self.google_cloud_storage_conn_id,
delegate_to=self.delegate_to)
for object, tmp_file_handle in files_to_upload.items():
hook.upload(self.bucket, object, tmp_file_handle.name, 'application/json')
@classmethod
def convert_types(cls, value):
"""
Takes a value from MySQLdb, and converts it to a value that's safe for
JSON/Google cloud storage/BigQuery. Dates are converted to UTC seconds.
Decimals are converted to floats.
"""
if type(value) in (datetime, date):
return time.mktime(value.timetuple())
elif isinstance(value, Decimal):
return float(value)
else:
return value
@classmethod
def type_map(cls, mysql_type):
"""
Helper function that maps from MySQL fields to BigQuery fields. Used
when a schema_filename is set.
"""
d = {
FIELD_TYPE.INT24: 'INTEGER',
FIELD_TYPE.TINY: 'INTEGER',
FIELD_TYPE.BIT: 'INTEGER',
FIELD_TYPE.DATETIME: 'TIMESTAMP',
FIELD_TYPE.DATE: 'TIMESTAMP',
FIELD_TYPE.DECIMAL: 'FLOAT',
FIELD_TYPE.NEWDECIMAL: 'FLOAT',
FIELD_TYPE.DOUBLE: 'FLOAT',
FIELD_TYPE.FLOAT: 'FLOAT',
FIELD_TYPE.INT24: 'INTEGER',
FIELD_TYPE.LONG: 'INTEGER',
FIELD_TYPE.LONGLONG: 'INTEGER',
FIELD_TYPE.SHORT: 'INTEGER',
FIELD_TYPE.TIMESTAMP: 'TIMESTAMP',
FIELD_TYPE.YEAR: 'INTEGER',
}
return d[mysql_type] if mysql_type in d else 'STRING'
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.